Update vendored dependencies
This commit is contained in:
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- "1.x"
|
||||||
|
- master
|
||||||
|
env:
|
||||||
|
- TAGS=""
|
||||||
|
- TAGS="-tags purego"
|
||||||
|
script: go test $TAGS -v ./...
|
||||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2016 Caleb Spare
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# xxhash
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/cespare/xxhash)
|
||||||
|
[](https://travis-ci.org/cespare/xxhash)
|
||||||
|
|
||||||
|
xxhash is a Go implementation of the 64-bit
|
||||||
|
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||||
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
|
standard library.
|
||||||
|
|
||||||
|
This package provides a straightforward API:
|
||||||
|
|
||||||
|
```
|
||||||
|
func Sum64(b []byte) uint64
|
||||||
|
func Sum64String(s string) uint64
|
||||||
|
type Digest struct{ ... }
|
||||||
|
func New() *Digest
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||||
|
|
||||||
|
```
|
||||||
|
func (*Digest) Write([]byte) (int, error)
|
||||||
|
func (*Digest) WriteString(string) (int, error)
|
||||||
|
func (*Digest) Sum64() uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
This implementation provides a fast pure-Go implementation and an even faster
|
||||||
|
assembly implementation for amd64.
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package is in a module and the latest code is in version 2 of the module.
|
||||||
|
You need a version of Go with at least "minimal module compatibility" to use
|
||||||
|
github.com/cespare/xxhash/v2:
|
||||||
|
|
||||||
|
* 1.9.7+ for Go 1.9
|
||||||
|
* 1.10.3+ for Go 1.10
|
||||||
|
* Go 1.11 or later
|
||||||
|
|
||||||
|
I recommend using the latest release of Go.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||||
|
implementations of Sum64.
|
||||||
|
|
||||||
|
| input size | purego | asm |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||||
|
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||||
|
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||||
|
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||||
|
|
||||||
|
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||||
|
the following commands under Go 1.11.2:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||||
|
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Projects using this package
|
||||||
|
|
||||||
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
module github.com/cespare/xxhash/v2
|
||||||
|
|
||||||
|
go 1.11
|
||||||
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||||
|
// at http://cyan4973.github.io/xxHash/.
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
prime1 uint64 = 11400714785074694791
|
||||||
|
prime2 uint64 = 14029467366897019727
|
||||||
|
prime3 uint64 = 1609587929392839161
|
||||||
|
prime4 uint64 = 9650029242287828579
|
||||||
|
prime5 uint64 = 2870177450012600261
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||||
|
// possible in the Go code is worth a small (but measurable) performance boost
|
||||||
|
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||||
|
// convenience in the Go code in a few places where we need to intentionally
|
||||||
|
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||||
|
// result overflows a uint64).
|
||||||
|
var (
|
||||||
|
prime1v = prime1
|
||||||
|
prime2v = prime2
|
||||||
|
prime3v = prime3
|
||||||
|
prime4v = prime4
|
||||||
|
prime5v = prime5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Digest implements hash.Hash64.
|
||||||
|
type Digest struct {
|
||||||
|
v1 uint64
|
||||||
|
v2 uint64
|
||||||
|
v3 uint64
|
||||||
|
v4 uint64
|
||||||
|
total uint64
|
||||||
|
mem [32]byte
|
||||||
|
n int // how much of mem is used
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||||
|
func New() *Digest {
|
||||||
|
var d Digest
|
||||||
|
d.Reset()
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
|
func (d *Digest) Reset() {
|
||||||
|
d.v1 = prime1v + prime2
|
||||||
|
d.v2 = prime2
|
||||||
|
d.v3 = 0
|
||||||
|
d.v4 = -prime1v
|
||||||
|
d.total = 0
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size always returns 8 bytes.
|
||||||
|
func (d *Digest) Size() int { return 8 }
|
||||||
|
|
||||||
|
// BlockSize always returns 32 bytes.
|
||||||
|
func (d *Digest) BlockSize() int { return 32 }
|
||||||
|
|
||||||
|
// Write adds more data to d. It always returns len(b), nil.
|
||||||
|
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||||
|
n = len(b)
|
||||||
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
if d.n+n < 32 {
|
||||||
|
// This new data doesn't even fill the current block.
|
||||||
|
copy(d.mem[d.n:], b)
|
||||||
|
d.n += n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.n > 0 {
|
||||||
|
// Finish off the partial block.
|
||||||
|
copy(d.mem[d.n:], b)
|
||||||
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
|
b = b[32-d.n:]
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) >= 32 {
|
||||||
|
// One or more full blocks left.
|
||||||
|
nw := writeBlocks(d, b)
|
||||||
|
b = b[nw:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store any remaining partial block.
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
d.n = len(b)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum appends the current hash to b and returns the resulting slice.
|
||||||
|
func (d *Digest) Sum(b []byte) []byte {
|
||||||
|
s := d.Sum64()
|
||||||
|
return append(
|
||||||
|
b,
|
||||||
|
byte(s>>56),
|
||||||
|
byte(s>>48),
|
||||||
|
byte(s>>40),
|
||||||
|
byte(s>>32),
|
||||||
|
byte(s>>24),
|
||||||
|
byte(s>>16),
|
||||||
|
byte(s>>8),
|
||||||
|
byte(s),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum64 returns the current hash.
|
||||||
|
func (d *Digest) Sum64() uint64 {
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if d.total >= 32 {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = d.v3 + prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += d.total
|
||||||
|
|
||||||
|
i, end := 0, d.n
|
||||||
|
for ; i+8 <= end; i += 8 {
|
||||||
|
k1 := round(0, u64(d.mem[i:i+8]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if i+4 <= end {
|
||||||
|
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
for i < end {
|
||||||
|
h ^= uint64(d.mem[i]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
magic = "xxh\x06"
|
||||||
|
marshaledSize = len(magic) + 8*5 + 32
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||||
|
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||||
|
b := make([]byte, 0, marshaledSize)
|
||||||
|
b = append(b, magic...)
|
||||||
|
b = appendUint64(b, d.v1)
|
||||||
|
b = appendUint64(b, d.v2)
|
||||||
|
b = appendUint64(b, d.v3)
|
||||||
|
b = appendUint64(b, d.v4)
|
||||||
|
b = appendUint64(b, d.total)
|
||||||
|
b = append(b, d.mem[:d.n]...)
|
||||||
|
b = b[:len(b)+len(d.mem)-d.n]
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||||
|
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||||
|
return errors.New("xxhash: invalid hash state identifier")
|
||||||
|
}
|
||||||
|
if len(b) != marshaledSize {
|
||||||
|
return errors.New("xxhash: invalid hash state size")
|
||||||
|
}
|
||||||
|
b = b[len(magic):]
|
||||||
|
b, d.v1 = consumeUint64(b)
|
||||||
|
b, d.v2 = consumeUint64(b)
|
||||||
|
b, d.v3 = consumeUint64(b)
|
||||||
|
b, d.v4 = consumeUint64(b)
|
||||||
|
b, d.total = consumeUint64(b)
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
b = b[len(d.mem):]
|
||||||
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendUint64(b []byte, x uint64) []byte {
|
||||||
|
var a [8]byte
|
||||||
|
binary.LittleEndian.PutUint64(a[:], x)
|
||||||
|
return append(b, a[:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||||
|
x := u64(b)
|
||||||
|
return b[8:], x
|
||||||
|
}
|
||||||
|
|
||||||
|
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||||
|
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||||
|
|
||||||
|
func round(acc, input uint64) uint64 {
|
||||||
|
acc += input * prime2
|
||||||
|
acc = rol31(acc)
|
||||||
|
acc *= prime1
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeRound(acc, val uint64) uint64 {
|
||||||
|
val = round(0, val)
|
||||||
|
acc ^= val
|
||||||
|
acc = acc*prime1 + prime4
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||||
|
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||||
|
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||||
|
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||||
|
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||||
|
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||||
|
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||||
|
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||||
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64 computes the 64-bit xxHash digest of b.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func Sum64(b []byte) uint64
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func writeBlocks(d *Digest, b []byte) int
|
||||||
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Register allocation:
|
||||||
|
// AX h
|
||||||
|
// CX pointer to advance through b
|
||||||
|
// DX n
|
||||||
|
// BX loop end
|
||||||
|
// R8 v1, k1
|
||||||
|
// R9 v2
|
||||||
|
// R10 v3
|
||||||
|
// R11 v4
|
||||||
|
// R12 tmp
|
||||||
|
// R13 prime1v
|
||||||
|
// R14 prime2v
|
||||||
|
// R15 prime4v
|
||||||
|
|
||||||
|
// round reads from and advances the buffer pointer in CX.
|
||||||
|
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||||
|
#define round(r) \
|
||||||
|
MOVQ (CX), R12 \
|
||||||
|
ADDQ $8, CX \
|
||||||
|
IMULQ R14, R12 \
|
||||||
|
ADDQ R12, r \
|
||||||
|
ROLQ $31, r \
|
||||||
|
IMULQ R13, r
|
||||||
|
|
||||||
|
// mergeRound applies a merge round on the two registers acc and val.
|
||||||
|
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||||
|
#define mergeRound(acc, val) \
|
||||||
|
IMULQ R14, val \
|
||||||
|
ROLQ $31, val \
|
||||||
|
IMULQ R13, val \
|
||||||
|
XORQ val, acc \
|
||||||
|
IMULQ R13, acc \
|
||||||
|
ADDQ R15, acc
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||||
|
// Load fixed primes.
|
||||||
|
MOVQ ·prime1v(SB), R13
|
||||||
|
MOVQ ·prime2v(SB), R14
|
||||||
|
MOVQ ·prime4v(SB), R15
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+0(FP), CX
|
||||||
|
MOVQ b_len+8(FP), DX
|
||||||
|
LEAQ (CX)(DX*1), BX
|
||||||
|
|
||||||
|
// The first loop limit will be len(b)-32.
|
||||||
|
SUBQ $32, BX
|
||||||
|
|
||||||
|
// Check whether we have at least one block.
|
||||||
|
CMPQ DX, $32
|
||||||
|
JLT noBlocks
|
||||||
|
|
||||||
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
|
MOVQ R13, R8
|
||||||
|
ADDQ R14, R8
|
||||||
|
MOVQ R14, R9
|
||||||
|
XORQ R10, R10
|
||||||
|
XORQ R11, R11
|
||||||
|
SUBQ R13, R11
|
||||||
|
|
||||||
|
// Loop until CX > BX.
|
||||||
|
blockLoop:
|
||||||
|
round(R8)
|
||||||
|
round(R9)
|
||||||
|
round(R10)
|
||||||
|
round(R11)
|
||||||
|
|
||||||
|
CMPQ CX, BX
|
||||||
|
JLE blockLoop
|
||||||
|
|
||||||
|
MOVQ R8, AX
|
||||||
|
ROLQ $1, AX
|
||||||
|
MOVQ R9, R12
|
||||||
|
ROLQ $7, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
MOVQ R10, R12
|
||||||
|
ROLQ $12, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
MOVQ R11, R12
|
||||||
|
ROLQ $18, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
|
||||||
|
mergeRound(AX, R8)
|
||||||
|
mergeRound(AX, R9)
|
||||||
|
mergeRound(AX, R10)
|
||||||
|
mergeRound(AX, R11)
|
||||||
|
|
||||||
|
JMP afterBlocks
|
||||||
|
|
||||||
|
noBlocks:
|
||||||
|
MOVQ ·prime5v(SB), AX
|
||||||
|
|
||||||
|
afterBlocks:
|
||||||
|
ADDQ DX, AX
|
||||||
|
|
||||||
|
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||||
|
ADDQ $24, BX
|
||||||
|
|
||||||
|
CMPQ CX, BX
|
||||||
|
JG fourByte
|
||||||
|
|
||||||
|
wordLoop:
|
||||||
|
// Calculate k1.
|
||||||
|
MOVQ (CX), R8
|
||||||
|
ADDQ $8, CX
|
||||||
|
IMULQ R14, R8
|
||||||
|
ROLQ $31, R8
|
||||||
|
IMULQ R13, R8
|
||||||
|
|
||||||
|
XORQ R8, AX
|
||||||
|
ROLQ $27, AX
|
||||||
|
IMULQ R13, AX
|
||||||
|
ADDQ R15, AX
|
||||||
|
|
||||||
|
CMPQ CX, BX
|
||||||
|
JLE wordLoop
|
||||||
|
|
||||||
|
fourByte:
|
||||||
|
ADDQ $4, BX
|
||||||
|
CMPQ CX, BX
|
||||||
|
JG singles
|
||||||
|
|
||||||
|
MOVL (CX), R8
|
||||||
|
ADDQ $4, CX
|
||||||
|
IMULQ R13, R8
|
||||||
|
XORQ R8, AX
|
||||||
|
|
||||||
|
ROLQ $23, AX
|
||||||
|
IMULQ R14, AX
|
||||||
|
ADDQ ·prime3v(SB), AX
|
||||||
|
|
||||||
|
singles:
|
||||||
|
ADDQ $4, BX
|
||||||
|
CMPQ CX, BX
|
||||||
|
JGE finalize
|
||||||
|
|
||||||
|
singlesLoop:
|
||||||
|
MOVBQZX (CX), R12
|
||||||
|
ADDQ $1, CX
|
||||||
|
IMULQ ·prime5v(SB), R12
|
||||||
|
XORQ R12, AX
|
||||||
|
|
||||||
|
ROLQ $11, AX
|
||||||
|
IMULQ R13, AX
|
||||||
|
|
||||||
|
CMPQ CX, BX
|
||||||
|
JL singlesLoop
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $33, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
IMULQ R14, AX
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $29, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
IMULQ ·prime3v(SB), AX
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $32, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
|
||||||
|
MOVQ AX, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||||
|
// the d pointer.
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||||
|
// Load fixed primes needed for round.
|
||||||
|
MOVQ ·prime1v(SB), R13
|
||||||
|
MOVQ ·prime2v(SB), R14
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+8(FP), CX
|
||||||
|
MOVQ b_len+16(FP), DX
|
||||||
|
LEAQ (CX)(DX*1), BX
|
||||||
|
SUBQ $32, BX
|
||||||
|
|
||||||
|
// Load vN from d.
|
||||||
|
MOVQ d+0(FP), AX
|
||||||
|
MOVQ 0(AX), R8 // v1
|
||||||
|
MOVQ 8(AX), R9 // v2
|
||||||
|
MOVQ 16(AX), R10 // v3
|
||||||
|
MOVQ 24(AX), R11 // v4
|
||||||
|
|
||||||
|
// We don't need to check the loop condition here; this function is
|
||||||
|
// always called with at least one block of data to process.
|
||||||
|
blockLoop:
|
||||||
|
round(R8)
|
||||||
|
round(R9)
|
||||||
|
round(R10)
|
||||||
|
round(R11)
|
||||||
|
|
||||||
|
CMPQ CX, BX
|
||||||
|
JLE blockLoop
|
||||||
|
|
||||||
|
// Copy vN back to d.
|
||||||
|
MOVQ R8, 0(AX)
|
||||||
|
MOVQ R9, 8(AX)
|
||||||
|
MOVQ R10, 16(AX)
|
||||||
|
MOVQ R11, 24(AX)
|
||||||
|
|
||||||
|
// The number of bytes written is CX minus the old base pointer.
|
||||||
|
SUBQ b_base+8(FP), CX
|
||||||
|
MOVQ CX, ret+32(FP)
|
||||||
|
|
||||||
|
RET
|
||||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
// +build !amd64 appengine !gc purego
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64 computes the 64-bit xxHash digest of b.
|
||||||
|
func Sum64(b []byte) uint64 {
|
||||||
|
// A simpler version would be
|
||||||
|
// d := New()
|
||||||
|
// d.Write(b)
|
||||||
|
// return d.Sum64()
|
||||||
|
// but this is faster, particularly for small inputs.
|
||||||
|
|
||||||
|
n := len(b)
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if n >= 32 {
|
||||||
|
v1 := prime1v + prime2
|
||||||
|
v2 := prime2
|
||||||
|
v3 := uint64(0)
|
||||||
|
v4 := -prime1v
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += uint64(n)
|
||||||
|
|
||||||
|
i, end := 0, len(b)
|
||||||
|
for ; i+8 <= end; i += 8 {
|
||||||
|
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if i+4 <= end {
|
||||||
|
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
for ; i < end; i++ {
|
||||||
|
h ^= uint64(b[i]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBlocks(d *Digest, b []byte) int {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
n := len(b)
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||||
|
return n - len(b)
|
||||||
|
}
|
||||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// +build appengine
|
||||||
|
|
||||||
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
return Sum64([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
return d.Write([]byte(s))
|
||||||
|
}
|
||||||
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
// This file encapsulates usage of unsafe.
|
||||||
|
// xxhash_safe.go contains the safe implementations.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Notes:
|
||||||
|
//
|
||||||
|
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||||
|
// for some discussion about these unsafe conversions.
|
||||||
|
//
|
||||||
|
// In the future it's possible that compiler optimizations will make these
|
||||||
|
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||||
|
//
|
||||||
|
// Both of these wrapper functions still incur function call overhead since they
|
||||||
|
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||||
|
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||||
|
// eventually fix this.
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
var b []byte
|
||||||
|
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
|
bh.Len = len(s)
|
||||||
|
bh.Cap = len(s)
|
||||||
|
return Sum64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
var b []byte
|
||||||
|
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
|
bh.Len = len(s)
|
||||||
|
bh.Cap = len(s)
|
||||||
|
return d.Write(b)
|
||||||
|
}
|
||||||
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
181
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
181
vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
# A more minimal logging API for Go
|
||||||
|
|
||||||
|
Before you consider this package, please read [this blog post by the
|
||||||
|
inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what
|
||||||
|
he has to say, and it largely aligns with my own experiences. Too many
|
||||||
|
choices of levels means inconsistent logs.
|
||||||
|
|
||||||
|
This package offers a purely abstract interface, based on these ideas but with
|
||||||
|
a few twists. Code can depend on just this interface and have the actual
|
||||||
|
logging implementation be injected from callers. Ideally only `main()` knows
|
||||||
|
what logging implementation is being used.
|
||||||
|
|
||||||
|
# Differences from Dave's ideas
|
||||||
|
|
||||||
|
The main differences are:
|
||||||
|
|
||||||
|
1) Dave basically proposes doing away with the notion of a logging API in favor
|
||||||
|
of `fmt.Printf()`. I disagree, especially when you consider things like output
|
||||||
|
locations, timestamps, file and line decorations, and structured logging. I
|
||||||
|
restrict the API to just 2 types of logs: info and error.
|
||||||
|
|
||||||
|
Info logs are things you want to tell the user which are not errors. Error
|
||||||
|
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||||
|
function call and is logging that `error` *and not returning it*, use error
|
||||||
|
logs.
|
||||||
|
|
||||||
|
2) Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||||
|
arbitrary grades of importance for info logs, without assigning names with
|
||||||
|
semantic meaning such as "warning", "trace", and "debug". Superficially this
|
||||||
|
may feel very similar, but the primary difference is the lack of semantics.
|
||||||
|
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||||
|
with higher verbosity means more (and less important) logs will be generated.
|
||||||
|
|
||||||
|
This is a BETA grade API.
|
||||||
|
|
||||||
|
There are implementations for the following logging libraries:
|
||||||
|
|
||||||
|
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||||
|
- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr)
|
||||||
|
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||||
|
- **log** (the Go standard library logger):
|
||||||
|
[stdr](https://github.com/go-logr/stdr)
|
||||||
|
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||||
|
|
||||||
|
# FAQ
|
||||||
|
|
||||||
|
## Conceptual
|
||||||
|
|
||||||
|
## Why structured logging?
|
||||||
|
|
||||||
|
- **Structured logs are more easily queriable**: Since you've got
|
||||||
|
key-value pairs, it's much easier to query your structured logs for
|
||||||
|
particular values by filtering on the contents of a particular key --
|
||||||
|
think searching request logs for error codes, Kubernetes reconcilers for
|
||||||
|
the name and namespace of the reconciled object, etc
|
||||||
|
|
||||||
|
- **Structured logging makes it easier to have cross-referencable logs**:
|
||||||
|
Similarly to searchability, if you maintain conventions around your
|
||||||
|
keys, it becomes easy to gather all log lines related to a particular
|
||||||
|
concept.
|
||||||
|
|
||||||
|
- **Structured logs allow better dimensions of filtering**: if you have
|
||||||
|
structure to your logs, you've got more precise control over how much
|
||||||
|
information is logged -- you might choose in a particular configuration
|
||||||
|
to log certain keys but not others, only log lines where a certain key
|
||||||
|
matches a certain value, etc, instead of just having v-levels and names
|
||||||
|
to key off of.
|
||||||
|
|
||||||
|
- **Structured logs better represent structured data**: sometimes, the
|
||||||
|
data that you want to log is inherently structured (think tuple-link
|
||||||
|
objects). Structured logs allow you to preserve that structure when
|
||||||
|
outputting.
|
||||||
|
|
||||||
|
## Why V-levels?
|
||||||
|
|
||||||
|
**V-levels give operators an easy way to control the chattiness of log
|
||||||
|
operations**. V-levels provide a way for a given package to distinguish
|
||||||
|
the relative importance or verbosity of a given log message. Then, if
|
||||||
|
a particular logger or package is logging too many messages, the user
|
||||||
|
of the package can simply change the v-levels for that library.
|
||||||
|
|
||||||
|
## Why not more named levels, like Warning?
|
||||||
|
|
||||||
|
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||||
|
from Dave's ideas](#differences-from-daves-ideas).
|
||||||
|
|
||||||
|
## Why not allow format strings, too?
|
||||||
|
|
||||||
|
**Format strings negate many of the benefits of structured logs**:
|
||||||
|
|
||||||
|
- They're not easily searchable without resorting to fuzzy searching,
|
||||||
|
regular expressions, etc
|
||||||
|
|
||||||
|
- They don't store structured data well, since contents are flattened into
|
||||||
|
a string
|
||||||
|
|
||||||
|
- They're not cross-referencable
|
||||||
|
|
||||||
|
- They don't compress easily, since the message is not constant
|
||||||
|
|
||||||
|
(unless you turn positional parameters into key-value pairs with numerical
|
||||||
|
keys, at which point you've gotten key-value logging with meaningless
|
||||||
|
keys)
|
||||||
|
|
||||||
|
## Practical
|
||||||
|
|
||||||
|
## Why key-value pairs, and not a map?
|
||||||
|
|
||||||
|
Key-value pairs are *much* easier to optimize, especially around
|
||||||
|
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||||
|
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||||
|
that show this quite nicely.
|
||||||
|
|
||||||
|
While the interface ends up being a little less obvious, you get
|
||||||
|
potentially better performance, plus avoid making users type
|
||||||
|
`map[string]string{}` every time they want to log.
|
||||||
|
|
||||||
|
## What if my V-levels differ between libraries?
|
||||||
|
|
||||||
|
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||||
|
`WithName` function to pass different loggers to different libraries.
|
||||||
|
|
||||||
|
Generally, you should take care to ensure that you have relatively
|
||||||
|
consistent V-levels within a given logger, however, as this makes deciding
|
||||||
|
on what verbosity of logs to request easier.
|
||||||
|
|
||||||
|
## But I *really* want to use a format string!
|
||||||
|
|
||||||
|
That's not actually a question. Assuming your question is "how do
|
||||||
|
I convert my mental model of logging with format strings to logging with
|
||||||
|
constant messages":
|
||||||
|
|
||||||
|
1. figure out what the error actually is, as you'd write in a TL;DR style,
|
||||||
|
and use that as a message
|
||||||
|
|
||||||
|
2. For every place you'd write a format specifier, look to the word before
|
||||||
|
it, and add that as a key value pair
|
||||||
|
|
||||||
|
For instance, consider the following examples (all taken from spots in the
|
||||||
|
Kubernetes codebase):
|
||||||
|
|
||||||
|
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||||
|
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||||
|
error", "code", responseCode)`
|
||||||
|
|
||||||
|
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||||
|
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||||
|
response when requesting url", "attempt", retries, "after
|
||||||
|
seconds", seconds, "url", url)`
|
||||||
|
|
||||||
|
If you *really* must use a format string, place it as a key value, and
|
||||||
|
call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to
|
||||||
|
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||||
|
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||||
|
this is necessary should be few and far between.
|
||||||
|
|
||||||
|
## How do I choose my V-levels?
|
||||||
|
|
||||||
|
This is basically the only hard constraint: increase V-levels to denote
|
||||||
|
more verbose or more debug-y logs.
|
||||||
|
|
||||||
|
Otherwise, you can start out with `0` as "you always want to see this",
|
||||||
|
`1` as "common logging that you might *possibly* want to turn off", and
|
||||||
|
`10` as "I would like to performance-test your log collection stack".
|
||||||
|
|
||||||
|
Then gradually choose levels in between as you need them, working your way
|
||||||
|
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||||
|
info-type logs).
|
||||||
|
|
||||||
|
## How do I choose my keys
|
||||||
|
|
||||||
|
- make your keys human-readable
|
||||||
|
- constant keys are generally a good idea
|
||||||
|
- be consistent across your codebase
|
||||||
|
- keys should naturally match parts of the message string
|
||||||
|
|
||||||
|
While key names are mostly unrestricted (and spaces are acceptable),
|
||||||
|
it's generally a good idea to stick to printable ascii characters, or at
|
||||||
|
least match the general character set of your log lines.
|
||||||
|
|
||||||
|
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||||
3
vendor/github.com/go-logr/logr/go.mod
generated
vendored
Normal file
3
vendor/github.com/go-logr/logr/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
module github.com/go-logr/logr
|
||||||
|
|
||||||
|
go 1.14
|
||||||
178
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
178
vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package logr defines abstract interfaces for logging. Packages can depend on
|
||||||
|
// these interfaces and callers can implement logging in whatever way is
|
||||||
|
// appropriate.
|
||||||
|
//
|
||||||
|
// This design derives from Dave Cheney's blog:
|
||||||
|
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||||
|
//
|
||||||
|
// This is a BETA grade API. Until there is a significant 2nd implementation,
|
||||||
|
// I don't really know how it will change.
|
||||||
|
//
|
||||||
|
// The logging specifically makes it non-trivial to use format strings, to encourage
|
||||||
|
// attaching structured information instead of unstructured format strings.
|
||||||
|
//
|
||||||
|
// Usage
|
||||||
|
//
|
||||||
|
// Logging is done using a Logger. Loggers can have name prefixes and named
|
||||||
|
// values attached, so that all log messages logged with that Logger have some
|
||||||
|
// base context associated.
|
||||||
|
//
|
||||||
|
// The term "key" is used to refer to the name associated with a particular
|
||||||
|
// value, to disambiguate it from the general Logger name.
|
||||||
|
//
|
||||||
|
// For instance, suppose we're trying to reconcile the state of an object, and
|
||||||
|
// we want to log that we've made some decision.
|
||||||
|
//
|
||||||
|
// With the traditional log package, we might write:
|
||||||
|
// log.Printf(
|
||||||
|
// "decided to set field foo to value %q for object %s/%s",
|
||||||
|
// targetValue, object.Namespace, object.Name)
|
||||||
|
//
|
||||||
|
// With logr's structured logging, we'd write:
|
||||||
|
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
|
||||||
|
// // and the named value target-type=Foo, for extra context.
|
||||||
|
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
|
||||||
|
//
|
||||||
|
// // later on...
|
||||||
|
// log.Info("setting field foo on object", "value", targetValue, "object", object)
|
||||||
|
//
|
||||||
|
// Depending on our logging implementation, we could then make logging decisions
|
||||||
|
// based on field values (like only logging such events for objects in a certain
|
||||||
|
// namespace), or copy the structured information into a structured log store.
|
||||||
|
//
|
||||||
|
// For logging errors, Logger has a method called Error. Suppose we wanted to
|
||||||
|
// log an error while reconciling. With the traditional log package, we might
|
||||||
|
// write:
|
||||||
|
// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
|
||||||
|
//
|
||||||
|
// With logr, we'd instead write:
|
||||||
|
// // assuming the above setup for log
|
||||||
|
// log.Error(err, "unable to reconcile object", "object", object)
|
||||||
|
//
|
||||||
|
// This functions similarly to:
|
||||||
|
// log.Info("unable to reconcile object", "error", err, "object", object)
|
||||||
|
//
|
||||||
|
// However, it ensures that a standard key for the error value ("error") is used
|
||||||
|
// across all error logging. Furthermore, certain implementations may choose to
|
||||||
|
// attach additional information (such as stack traces) on calls to Error, so
|
||||||
|
// it's preferred to use Error to log errors.
|
||||||
|
//
|
||||||
|
// Parts of a log line
|
||||||
|
//
|
||||||
|
// Each log message from a Logger has four types of context:
|
||||||
|
// logger name, log verbosity, log message, and the named values.
|
||||||
|
//
|
||||||
|
// The Logger name constists of a series of name "segments" added by successive
|
||||||
|
// calls to WithName. These name segments will be joined in some way by the
|
||||||
|
// underlying implementation. It is strongly reccomended that name segements
|
||||||
|
// contain simple identifiers (letters, digits, and hyphen), and do not contain
|
||||||
|
// characters that could muddle the log output or confuse the joining operation
|
||||||
|
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
|
||||||
|
//
|
||||||
|
// Log verbosity represents how little a log matters. Level zero, the default,
|
||||||
|
// matters most. Increasing levels matter less and less. Try to avoid lots of
|
||||||
|
// different verbosity levels, and instead provide useful keys, logger names,
|
||||||
|
// and log messages for users to filter on. It's illegal to pass a log level
|
||||||
|
// below zero.
|
||||||
|
//
|
||||||
|
// The log message consists of a constant message attached to the the log line.
|
||||||
|
// This should generally be a simple description of what's occuring, and should
|
||||||
|
// never be a format string.
|
||||||
|
//
|
||||||
|
// Variable information can then be attached using named values (key/value
|
||||||
|
// pairs). Keys are arbitrary strings, while values may be any Go value.
|
||||||
|
//
|
||||||
|
// Key Naming Conventions
|
||||||
|
//
|
||||||
|
// Keys are not strictly required to conform to any specification or regex, but
|
||||||
|
// it is recommended that they:
|
||||||
|
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||||
|
// * be constant (not dependent on input data)
|
||||||
|
// * contain only printable characters
|
||||||
|
// * not contain whitespace or punctuation
|
||||||
|
//
|
||||||
|
// These guidelines help ensure that log data is processed properly regardless
|
||||||
|
// of the log implementation. For example, log implementations will try to
|
||||||
|
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||||
|
//
|
||||||
|
// While users are generally free to use key names of their choice, it's
|
||||||
|
// generally best to avoid using the following keys, as they're frequently used
|
||||||
|
// by implementations:
|
||||||
|
//
|
||||||
|
// - `"caller"`: the calling information (file/line) of a particular log line.
|
||||||
|
// - `"error"`: the underlying error value in the `Error` method.
|
||||||
|
// - `"level"`: the log level.
|
||||||
|
// - `"logger"`: the name of the associated logger.
|
||||||
|
// - `"msg"`: the log message.
|
||||||
|
// - `"stacktrace"`: the stack trace associated with a particular log line or
|
||||||
|
// error (often from the `Error` message).
|
||||||
|
// - `"ts"`: the timestamp for a log line.
|
||||||
|
//
|
||||||
|
// Implementations are encouraged to make use of these keys to represent the
|
||||||
|
// above concepts, when neccessary (for example, in a pure-JSON output form, it
|
||||||
|
// would be necessary to represent at least message and timestamp as ordinary
|
||||||
|
// named values).
|
||||||
|
package logr
|
||||||
|
|
||||||
|
// TODO: consider adding back in format strings if they're really needed
|
||||||
|
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
|
||||||
|
// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
|
||||||
|
|
||||||
|
// Logger represents the ability to log messages, both errors and not.
|
||||||
|
type Logger interface {
|
||||||
|
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||||
|
// flags might be used to set the logging verbosity and disable some info
|
||||||
|
// logs.
|
||||||
|
Enabled() bool
|
||||||
|
|
||||||
|
// Info logs a non-error message with the given key/value pairs as context.
|
||||||
|
//
|
||||||
|
// The msg argument should be used to add some constant description to
|
||||||
|
// the log line. The key/value pairs can then be used to add additional
|
||||||
|
// variable information. The key/value pairs should alternate string
|
||||||
|
// keys and arbitrary values.
|
||||||
|
Info(msg string, keysAndValues ...interface{})
|
||||||
|
|
||||||
|
// Error logs an error, with the given message and key/value pairs as context.
|
||||||
|
// It functions similarly to calling Info with the "error" named value, but may
|
||||||
|
// have unique behavior, and should be preferred for logging errors (see the
|
||||||
|
// package documentations for more information).
|
||||||
|
//
|
||||||
|
// The msg field should be used to add context to any underlying error,
|
||||||
|
// while the err field should be used to attach the actual error that
|
||||||
|
// triggered this log line, if present.
|
||||||
|
Error(err error, msg string, keysAndValues ...interface{})
|
||||||
|
|
||||||
|
// V returns an Logger value for a specific verbosity level, relative to
|
||||||
|
// this Logger. In other words, V values are additive. V higher verbosity
|
||||||
|
// level means a log message is less important. It's illegal to pass a log
|
||||||
|
// level less than zero.
|
||||||
|
V(level int) Logger
|
||||||
|
|
||||||
|
// WithValues adds some key-value pairs of context to a logger.
|
||||||
|
// See Info for documentation on how key/value pairs work.
|
||||||
|
WithValues(keysAndValues ...interface{}) Logger
|
||||||
|
|
||||||
|
// WithName adds a new element to the logger's name.
|
||||||
|
// Successive calls with WithName continue to append
|
||||||
|
// suffixes to the logger's name. It's strongly reccomended
|
||||||
|
// that name segments contain only letters, digits, and hyphens
|
||||||
|
// (see the package documentation for more information).
|
||||||
|
WithName(name string) Logger
|
||||||
|
}
|
||||||
228
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
228
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
@@ -1,92 +1,184 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Package descriptor provides functions for obtaining protocol buffer
|
// Package descriptor provides functions for obtaining the protocol buffer
|
||||||
// descriptors for generated Go types.
|
// descriptors of generated Go types.
|
||||||
//
|
//
|
||||||
// These functions cannot go in package proto because they depend on the
|
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
|
||||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
|
||||||
|
// programatically interact with the protobuf type system.
|
||||||
package descriptor
|
package descriptor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
"google.golang.org/protobuf/reflect/protodesc"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
|
||||||
|
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
)
|
)
|
||||||
|
|
||||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
// Message is proto.Message with a method to return its descriptor.
|
||||||
func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
|
|
||||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fd := new(protobuf.FileDescriptorProto)
|
|
||||||
if err := proto.Unmarshal(b, fd); err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is a proto.Message with a method to return its descriptor.
|
|
||||||
//
|
//
|
||||||
// Message types generated by the protocol compiler always satisfy
|
// Deprecated: The Descriptor method may not be generated by future
|
||||||
// the Message interface.
|
// versions of protoc-gen-go, meaning that this interface may not
|
||||||
|
// be implemented by many concrete message types.
|
||||||
type Message interface {
|
type Message interface {
|
||||||
proto.Message
|
proto.Message
|
||||||
Descriptor() ([]byte, []int)
|
Descriptor() ([]byte, []int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
// ForMessage returns the file descriptor proto containing
|
||||||
// describing the given message.
|
// the message and the message descriptor proto for the message itself.
|
||||||
func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
|
// The returned proto messages must not be mutated.
|
||||||
gz, path := msg.Descriptor()
|
//
|
||||||
fd, err := extractFile(gz)
|
// Deprecated: Not all concrete message types satisfy the Message interface.
|
||||||
if err != nil {
|
// Use MessageDescriptorProto instead. If possible, the calling code should
|
||||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
// be rewritten to use protobuf reflection instead.
|
||||||
|
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
|
||||||
|
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||||
|
return MessageDescriptorProto(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawDesc struct {
|
||||||
|
fileDesc []byte
|
||||||
|
indexes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
|
||||||
|
|
||||||
|
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
|
||||||
|
// Fast-path: check whether raw descriptors are already cached.
|
||||||
|
origDesc := d
|
||||||
|
if v, ok := rawDescCache.Load(origDesc); ok {
|
||||||
|
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||||
}
|
}
|
||||||
|
|
||||||
md = fd.MessageType[path[0]]
|
// Slow-path: derive the raw descriptor from the v2 descriptor.
|
||||||
for _, i := range path[1:] {
|
|
||||||
|
// Start with the leaf (a given enum or message declaration) and
|
||||||
|
// ascend upwards until we hit the parent file descriptor.
|
||||||
|
var idxs []int
|
||||||
|
for {
|
||||||
|
idxs = append(idxs, d.Index())
|
||||||
|
d = d.Parent()
|
||||||
|
if d == nil {
|
||||||
|
// TODO: We could construct a FileDescriptor stub for standalone
|
||||||
|
// descriptors to satisfy the API.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if _, ok := d.(protoreflect.FileDescriptor); ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain the raw file descriptor.
|
||||||
|
var raw []byte
|
||||||
|
switch fd := d.(type) {
|
||||||
|
case interface{ ProtoLegacyRawDesc() []byte }:
|
||||||
|
raw = fd.ProtoLegacyRawDesc()
|
||||||
|
case protoreflect.FileDescriptor:
|
||||||
|
raw, _ = proto.Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||||
|
}
|
||||||
|
file := protoimpl.X.CompressGZIP(raw)
|
||||||
|
|
||||||
|
// Reverse the indexes, since we populated it in reverse.
|
||||||
|
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
|
||||||
|
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||||
|
}
|
||||||
|
return file, idxs
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||||
|
// the enum and the index path to reach the enum declaration.
|
||||||
|
// The returned slices must not be mutated.
|
||||||
|
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
|
||||||
|
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
|
||||||
|
return ev.EnumDescriptor()
|
||||||
|
}
|
||||||
|
ed := protoimpl.X.EnumTypeOf(e)
|
||||||
|
return deriveRawDescriptor(ed.Descriptor())
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||||
|
// the message and the index path to reach the message declaration.
|
||||||
|
// The returned slices must not be mutated.
|
||||||
|
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
|
||||||
|
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
|
||||||
|
return mv.Descriptor()
|
||||||
|
}
|
||||||
|
md := protoimpl.X.MessageTypeOf(m)
|
||||||
|
return deriveRawDescriptor(md.Descriptor())
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
|
||||||
|
|
||||||
|
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
|
||||||
|
// Fast-path: check whether descriptor protos are already cached.
|
||||||
|
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
|
||||||
|
return v.(*descriptorpb.FileDescriptorProto)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow-path: derive the descriptor proto from the GZIP'd message.
|
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(zr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fd := new(descriptorpb.FileDescriptorProto)
|
||||||
|
if err := proto.Unmarshal(b, fd); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
|
||||||
|
return v.(*descriptorpb.FileDescriptorProto)
|
||||||
|
}
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumDescriptorProto returns the file descriptor proto representing
|
||||||
|
// the enum and the enum descriptor proto for the enum itself.
|
||||||
|
// The returned proto messages must not be mutated.
|
||||||
|
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
|
||||||
|
rawDesc, idxs := EnumRawDescriptor(e)
|
||||||
|
if rawDesc == nil || idxs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
fd := deriveFileDescriptor(rawDesc)
|
||||||
|
if len(idxs) == 1 {
|
||||||
|
return fd, fd.EnumType[idxs[0]]
|
||||||
|
}
|
||||||
|
md := fd.MessageType[idxs[0]]
|
||||||
|
for _, i := range idxs[1 : len(idxs)-1] {
|
||||||
|
md = md.NestedType[i]
|
||||||
|
}
|
||||||
|
ed := md.EnumType[idxs[len(idxs)-1]]
|
||||||
|
return fd, ed
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageDescriptorProto returns the file descriptor proto representing
|
||||||
|
// the message and the message descriptor proto for the message itself.
|
||||||
|
// The returned proto messages must not be mutated.
|
||||||
|
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||||
|
rawDesc, idxs := MessageRawDescriptor(m)
|
||||||
|
if rawDesc == nil || idxs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
fd := deriveFileDescriptor(rawDesc)
|
||||||
|
md := fd.MessageType[idxs[0]]
|
||||||
|
for _, i := range idxs[1:] {
|
||||||
md = md.NestedType[i]
|
md = md.NestedType[i]
|
||||||
}
|
}
|
||||||
return fd, md
|
return fd, md
|
||||||
|
|||||||
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
WireVarint = 0
|
||||||
|
WireFixed32 = 5
|
||||||
|
WireFixed64 = 1
|
||||||
|
WireBytes = 2
|
||||||
|
WireStartGroup = 3
|
||||||
|
WireEndGroup = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeVarint returns the varint encoded bytes of v.
|
||||||
|
func EncodeVarint(v uint64) []byte {
|
||||||
|
return protowire.AppendVarint(nil, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||||
|
// This is equal to len(EncodeVarint(v)).
|
||||||
|
func SizeVarint(v uint64) int {
|
||||||
|
return protowire.SizeVarint(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeVarint parses a varint encoded integer from b,
|
||||||
|
// returning the integer value and the length of the varint.
|
||||||
|
// It returns (0, 0) if there is a parse error.
|
||||||
|
func DecodeVarint(b []byte) (uint64, int) {
|
||||||
|
v, n := protowire.ConsumeVarint(b)
|
||||||
|
if n < 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
return v, n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||||
|
// It may be reused between invocations to reduce memory usage.
|
||||||
|
type Buffer struct {
|
||||||
|
buf []byte
|
||||||
|
idx int
|
||||||
|
deterministic bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer allocates a new Buffer initialized with buf,
|
||||||
|
// where the contents of buf are considered the unread portion of the buffer.
|
||||||
|
func NewBuffer(buf []byte) *Buffer {
|
||||||
|
return &Buffer{buf: buf}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeterministic specifies whether to use deterministic serialization.
|
||||||
|
//
|
||||||
|
// Deterministic serialization guarantees that for a given binary, equal
|
||||||
|
// messages will always be serialized to the same bytes. This implies:
|
||||||
|
//
|
||||||
|
// - Repeated serialization of a message will return the same bytes.
|
||||||
|
// - Different processes of the same binary (which may be executing on
|
||||||
|
// different machines) will serialize equal messages to the same bytes.
|
||||||
|
//
|
||||||
|
// Note that the deterministic serialization is NOT canonical across
|
||||||
|
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||||
|
// across different builds with schema changes due to unknown fields.
|
||||||
|
// Users who need canonical serialization (e.g., persistent storage in a
|
||||||
|
// canonical form, fingerprinting, etc.) should define their own
|
||||||
|
// canonicalization specification and implement their own serializer rather
|
||||||
|
// than relying on this API.
|
||||||
|
//
|
||||||
|
// If deterministic serialization is requested, map entries will be sorted
|
||||||
|
// by keys in lexographical order. This is an implementation detail and
|
||||||
|
// subject to change.
|
||||||
|
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||||
|
b.deterministic = deterministic
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBuf sets buf as the internal buffer,
|
||||||
|
// where the contents of buf are considered the unread portion of the buffer.
|
||||||
|
func (b *Buffer) SetBuf(buf []byte) {
|
||||||
|
b.buf = buf
|
||||||
|
b.idx = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the internal buffer of all written and unread data.
|
||||||
|
func (b *Buffer) Reset() {
|
||||||
|
b.buf = b.buf[:0]
|
||||||
|
b.idx = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the internal buffer.
|
||||||
|
func (b *Buffer) Bytes() []byte {
|
||||||
|
return b.buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unread returns the unread portion of the buffer.
|
||||||
|
func (b *Buffer) Unread() []byte {
|
||||||
|
return b.buf[b.idx:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal appends the wire-format encoding of m to the buffer.
|
||||||
|
func (b *Buffer) Marshal(m Message) error {
|
||||||
|
var err error
|
||||||
|
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses the wire-format message in the buffer and
|
||||||
|
// places the decoded results in m.
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) Unmarshal(m Message) error {
|
||||||
|
err := UnmarshalMerge(b.Unread(), m)
|
||||||
|
b.idx = len(b.buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||||
|
|
||||||
|
func (m *unknownFields) String() string { panic("not implemented") }
|
||||||
|
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||||
|
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||||
|
|
||||||
|
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||||
|
// to stdout. This is only intended for debugging.
|
||||||
|
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||||
|
m := MessageReflect(new(unknownFields))
|
||||||
|
m.SetUnknown(b)
|
||||||
|
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||||
|
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||||
|
b.buf = protowire.AppendVarint(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||||
|
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||||
|
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||||
|
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||||
|
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||||
|
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||||
|
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||||
|
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||||
|
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||||
|
b.buf = protowire.AppendBytes(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||||
|
// It does not validate whether v contains valid UTF-8.
|
||||||
|
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||||
|
b.buf = protowire.AppendString(b.buf, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||||
|
func (b *Buffer) EncodeMessage(m Message) error {
|
||||||
|
var err error
|
||||||
|
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||||
|
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||||
|
v, err := b.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||||
|
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||||
|
v, err := b.DecodeVarint()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||||
|
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||||
|
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||||
|
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return 0, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return uint64(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||||
|
// If alloc is specified, it returns a copy the raw bytes
|
||||||
|
// rather than a sub-slice of the buffer.
|
||||||
|
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||||
|
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return nil, protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
if alloc {
|
||||||
|
v = append([]byte(nil), v...)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||||
|
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||||
|
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||||
|
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||||
|
if n < 0 {
|
||||||
|
return "", protowire.ParseError(n)
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) DecodeMessage(m Message) error {
|
||||||
|
v, err := b.DecodeRawBytes(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return UnmarshalMerge(v, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeGroup consumes a message group from the buffer.
|
||||||
|
// It assumes that the start group marker has already been consumed and
|
||||||
|
// consumes all bytes until (and including the end group marker).
|
||||||
|
// It does not reset m before unmarshaling.
|
||||||
|
func (b *Buffer) DecodeGroup(m Message) error {
|
||||||
|
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.idx += n
|
||||||
|
return UnmarshalMerge(v, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeGroup parses b until it finds an end group marker, returning
|
||||||
|
// the raw bytes of the message (excluding the end group marker) and the
|
||||||
|
// the total length of the message (including the end group marker).
|
||||||
|
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||||
|
b0 := b
|
||||||
|
depth := 1 // assume this follows a start group marker
|
||||||
|
for {
|
||||||
|
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||||
|
if tagLen < 0 {
|
||||||
|
return nil, 0, protowire.ParseError(tagLen)
|
||||||
|
}
|
||||||
|
b = b[tagLen:]
|
||||||
|
|
||||||
|
var valLen int
|
||||||
|
switch wtyp {
|
||||||
|
case protowire.VarintType:
|
||||||
|
_, valLen = protowire.ConsumeVarint(b)
|
||||||
|
case protowire.Fixed32Type:
|
||||||
|
_, valLen = protowire.ConsumeFixed32(b)
|
||||||
|
case protowire.Fixed64Type:
|
||||||
|
_, valLen = protowire.ConsumeFixed64(b)
|
||||||
|
case protowire.BytesType:
|
||||||
|
_, valLen = protowire.ConsumeBytes(b)
|
||||||
|
case protowire.StartGroupType:
|
||||||
|
depth++
|
||||||
|
case protowire.EndGroupType:
|
||||||
|
depth--
|
||||||
|
default:
|
||||||
|
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||||
|
}
|
||||||
|
if valLen < 0 {
|
||||||
|
return nil, 0, protowire.ParseError(valLen)
|
||||||
|
}
|
||||||
|
b = b[valLen:]
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
|
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@@ -1,253 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Protocol buffer deep copy and merge.
|
|
||||||
// TODO: RawMessage.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Clone returns a deep copy of a protocol buffer.
|
|
||||||
func Clone(src Message) Message {
|
|
||||||
in := reflect.ValueOf(src)
|
|
||||||
if in.IsNil() {
|
|
||||||
return src
|
|
||||||
}
|
|
||||||
out := reflect.New(in.Type().Elem())
|
|
||||||
dst := out.Interface().(Message)
|
|
||||||
Merge(dst, src)
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merger is the interface representing objects that can merge messages of the same type.
|
|
||||||
type Merger interface {
|
|
||||||
// Merge merges src into this message.
|
|
||||||
// Required and optional fields that are set in src will be set to that value in dst.
|
|
||||||
// Elements of repeated fields will be appended.
|
|
||||||
//
|
|
||||||
// Merge may panic if called with a different argument type than the receiver.
|
|
||||||
Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generatedMerger is the custom merge method that generated protos will have.
|
|
||||||
// We must add this method since a generate Merge method will conflict with
|
|
||||||
// many existing protos that have a Merge data field already defined.
|
|
||||||
type generatedMerger interface {
|
|
||||||
XXX_Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges src into dst.
|
|
||||||
// Required and optional fields that are set in src will be set to that value in dst.
|
|
||||||
// Elements of repeated fields will be appended.
|
|
||||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
|
||||||
func Merge(dst, src Message) {
|
|
||||||
if m, ok := dst.(Merger); ok {
|
|
||||||
m.Merge(src)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
in := reflect.ValueOf(src)
|
|
||||||
out := reflect.ValueOf(dst)
|
|
||||||
if out.IsNil() {
|
|
||||||
panic("proto: nil destination")
|
|
||||||
}
|
|
||||||
if in.Type() != out.Type() {
|
|
||||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
|
||||||
}
|
|
||||||
if in.IsNil() {
|
|
||||||
return // Merge from nil src is a noop
|
|
||||||
}
|
|
||||||
if m, ok := dst.(generatedMerger); ok {
|
|
||||||
m.XXX_Merge(src)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeStruct(out, in reflect.Value) {
|
|
||||||
sprop := GetProperties(in.Type())
|
|
||||||
for i := 0; i < in.NumField(); i++ {
|
|
||||||
f := in.Type().Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
|
||||||
mIn, muIn := emIn.extensionsRead()
|
|
||||||
if mIn != nil {
|
|
||||||
mOut := emOut.extensionsWrite()
|
|
||||||
muIn.Lock()
|
|
||||||
mergeExtension(mOut, mIn)
|
|
||||||
muIn.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uf := in.FieldByName("XXX_unrecognized")
|
|
||||||
if !uf.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uin := uf.Bytes()
|
|
||||||
if len(uin) > 0 {
|
|
||||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergeAny performs a merge between two values of the same type.
|
|
||||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
|
||||||
// prop is set if this is a struct field (it may be nil).
|
|
||||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
|
||||||
if in.Type() == protoMessageType {
|
|
||||||
if !in.IsNil() {
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
|
||||||
} else {
|
|
||||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch in.Kind() {
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
|
||||||
if !viaPtr && isProto3Zero(in) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.Set(in)
|
|
||||||
case reflect.Interface:
|
|
||||||
// Probably a oneof field; copy non-nil values.
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Allocate destination if it is not set, or set to a different type.
|
|
||||||
// Otherwise we will merge as normal.
|
|
||||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
|
||||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
|
||||||
}
|
|
||||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
|
||||||
case reflect.Map:
|
|
||||||
if in.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.MakeMap(in.Type()))
|
|
||||||
}
|
|
||||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
|
||||||
elemKind := in.Type().Elem().Kind()
|
|
||||||
for _, key := range in.MapKeys() {
|
|
||||||
var val reflect.Value
|
|
||||||
switch elemKind {
|
|
||||||
case reflect.Ptr:
|
|
||||||
val = reflect.New(in.Type().Elem().Elem())
|
|
||||||
mergeAny(val, in.MapIndex(key), false, nil)
|
|
||||||
case reflect.Slice:
|
|
||||||
val = in.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
|
||||||
default:
|
|
||||||
val = in.MapIndex(key)
|
|
||||||
}
|
|
||||||
out.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.New(in.Elem().Type()))
|
|
||||||
}
|
|
||||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
|
||||||
case reflect.Slice:
|
|
||||||
if in.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
|
||||||
// []byte is a scalar bytes field, not a repeated field.
|
|
||||||
|
|
||||||
// Edge case: if this is in a proto3 message, a zero length
|
|
||||||
// bytes field is considered the zero value, and should not
|
|
||||||
// be merged.
|
|
||||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a deep copy.
|
|
||||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
|
||||||
// with a nil result.
|
|
||||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n := in.Len()
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
|
||||||
}
|
|
||||||
switch in.Type().Elem().Kind() {
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
|
||||||
out.Set(reflect.AppendSlice(out, in))
|
|
||||||
default:
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
|
||||||
mergeAny(x, in.Index(i), false, nil)
|
|
||||||
out.Set(reflect.Append(out, x))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
mergeStruct(out, in)
|
|
||||||
default:
|
|
||||||
// unknown type, so not a protocol buffer
|
|
||||||
log.Printf("proto: don't know how to copy %v", in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeExtension(out, in map[int32]Extension) {
|
|
||||||
for extNum, eIn := range in {
|
|
||||||
eOut := Extension{desc: eIn.desc}
|
|
||||||
if eIn.value != nil {
|
|
||||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
|
||||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
|
||||||
eOut.value = v.Interface()
|
|
||||||
}
|
|
||||||
if eIn.enc != nil {
|
|
||||||
eOut.enc = make([]byte, len(eIn.enc))
|
|
||||||
copy(eOut.enc, eIn.enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
out[extNum] = eOut
|
|
||||||
}
|
|
||||||
}
|
|
||||||
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@@ -1,427 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errOverflow is returned when an integer is too large to be represented.
|
|
||||||
var errOverflow = errors.New("proto: integer overflow")
|
|
||||||
|
|
||||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
|
||||||
// wire type is encountered. It does not get returned to user code.
|
|
||||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
|
||||||
// It returns the integer and the number of bytes consumed, or
|
|
||||||
// zero if there is not enough.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
|
||||||
if n >= len(buf) {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
b := uint64(buf[n])
|
|
||||||
n++
|
|
||||||
x |= (b & 0x7F) << shift
|
|
||||||
if (b & 0x80) == 0 {
|
|
||||||
return x, n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The number is too large to represent in a 64-bit value.
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
|
||||||
i := p.index
|
|
||||||
l := len(p.buf)
|
|
||||||
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
|
||||||
if i >= l {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b := p.buf[i]
|
|
||||||
i++
|
|
||||||
x |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
p.index = i
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The number is too large to represent in a 64-bit value.
|
|
||||||
err = errOverflow
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
i := p.index
|
|
||||||
buf := p.buf
|
|
||||||
|
|
||||||
if i >= len(buf) {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
} else if buf[i] < 0x80 {
|
|
||||||
p.index++
|
|
||||||
return uint64(buf[i]), nil
|
|
||||||
} else if len(buf)-i < 10 {
|
|
||||||
return p.decodeVarintSlow()
|
|
||||||
}
|
|
||||||
|
|
||||||
var b uint64
|
|
||||||
// we already checked the first byte
|
|
||||||
x = uint64(buf[i]) - 0x80
|
|
||||||
i++
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 7
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 7
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 14
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 14
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 21
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 21
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 28
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 28
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 35
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 35
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 42
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 42
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 49
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 49
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 56
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 56
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 63
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, errOverflow
|
|
||||||
|
|
||||||
done:
|
|
||||||
p.index = i
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
i := p.index + 8
|
|
||||||
if i < 0 || i > len(p.buf) {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.index = i
|
|
||||||
|
|
||||||
x = uint64(p.buf[i-8])
|
|
||||||
x |= uint64(p.buf[i-7]) << 8
|
|
||||||
x |= uint64(p.buf[i-6]) << 16
|
|
||||||
x |= uint64(p.buf[i-5]) << 24
|
|
||||||
x |= uint64(p.buf[i-4]) << 32
|
|
||||||
x |= uint64(p.buf[i-3]) << 40
|
|
||||||
x |= uint64(p.buf[i-2]) << 48
|
|
||||||
x |= uint64(p.buf[i-1]) << 56
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed32, sfixed32, and float protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
i := p.index + 4
|
|
||||||
if i < 0 || i > len(p.buf) {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.index = i
|
|
||||||
|
|
||||||
x = uint64(p.buf[i-4])
|
|
||||||
x |= uint64(p.buf[i-3]) << 8
|
|
||||||
x |= uint64(p.buf[i-2]) << 16
|
|
||||||
x |= uint64(p.buf[i-1]) << 24
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
|
||||||
// from the Buffer.
|
|
||||||
// This is the format used for the sint64 protocol buffer type.
|
|
||||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
|
||||||
x, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
|
||||||
// from the Buffer.
|
|
||||||
// This is the format used for the sint32 protocol buffer type.
|
|
||||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
|
||||||
x, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
|
||||||
// This is the format used for the bytes protocol buffer
|
|
||||||
// type and for embedded messages.
|
|
||||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
|
||||||
n, err := p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nb := int(n)
|
|
||||||
if nb < 0 {
|
|
||||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
|
||||||
}
|
|
||||||
end := p.index + nb
|
|
||||||
if end < p.index || end > len(p.buf) {
|
|
||||||
return nil, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if !alloc {
|
|
||||||
// todo: check if can get more uses of alloc=false
|
|
||||||
buf = p.buf[p.index:end]
|
|
||||||
p.index += nb
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = make([]byte, nb)
|
|
||||||
copy(buf, p.buf[p.index:])
|
|
||||||
p.index += nb
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
|
||||||
// This is the format used for the proto2 string type.
|
|
||||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
|
||||||
buf, err := p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface representing objects that can
|
|
||||||
// unmarshal themselves. The argument points to data that may be
|
|
||||||
// overwritten, so implementations should not keep references to the
|
|
||||||
// buffer.
|
|
||||||
// Unmarshal implementations should not clear the receiver.
|
|
||||||
// Any unmarshaled data should be merged into the receiver.
|
|
||||||
// Callers of Unmarshal that do not want to retain existing data
|
|
||||||
// should Reset the receiver before calling Unmarshal.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newUnmarshaler is the interface representing objects that can
|
|
||||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
|
||||||
//
|
|
||||||
// This exists to support protoc-gen-go generated messages.
|
|
||||||
// The proto package will stop type-asserting to this interface in the future.
|
|
||||||
//
|
|
||||||
// DO NOT DEPEND ON THIS.
|
|
||||||
type newUnmarshaler interface {
|
|
||||||
XXX_Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
|
||||||
// decoded result in pb. If the struct underlying pb does not match
|
|
||||||
// the data in buf, the results can be unpredictable.
|
|
||||||
//
|
|
||||||
// Unmarshal resets pb before starting to unmarshal, so any
|
|
||||||
// existing data in pb is always removed. Use UnmarshalMerge
|
|
||||||
// to preserve and append to existing data.
|
|
||||||
func Unmarshal(buf []byte, pb Message) error {
|
|
||||||
pb.Reset()
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
return u.XXX_Unmarshal(buf)
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
return u.Unmarshal(buf)
|
|
||||||
}
|
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
|
||||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
|
||||||
// the data in buf, the results can be unpredictable.
|
|
||||||
//
|
|
||||||
// UnmarshalMerge merges into existing data in pb.
|
|
||||||
// Most code should use Unmarshal instead.
|
|
||||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
return u.XXX_Unmarshal(buf)
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
// NOTE: The history of proto have unfortunately been inconsistent
|
|
||||||
// whether Unmarshaler should or should not implicitly clear itself.
|
|
||||||
// Some implementations do, most do not.
|
|
||||||
// Thus, calling this here may or may not do what people want.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/protobuf/issues/424
|
|
||||||
return u.Unmarshal(buf)
|
|
||||||
}
|
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
|
||||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
|
||||||
enc, err := p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return NewBuffer(enc).Unmarshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
|
||||||
// StartGroup tag is already consumed. This function consumes
|
|
||||||
// EndGroup tag.
|
|
||||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
|
||||||
b := p.buf[p.index:]
|
|
||||||
x, y := findEndGroup(b)
|
|
||||||
if x < 0 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
err := Unmarshal(b[:x], pb)
|
|
||||||
p.index += y
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in the
|
|
||||||
// Buffer and places the decoded result in pb. If the struct
|
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
|
||||||
// unpredictable.
|
|
||||||
//
|
|
||||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if u, ok := pb.(newUnmarshaler); ok {
|
|
||||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
|
||||||
// NOTE: The history of proto have unfortunately been inconsistent
|
|
||||||
// whether Unmarshaler should or should not implicitly clear itself.
|
|
||||||
// Some implementations do, most do not.
|
|
||||||
// Thus, calling this here may or may not do what people want.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/protobuf/issues/424
|
|
||||||
err := u.Unmarshal(p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slow workaround for messages that aren't Unmarshalers.
|
|
||||||
// This includes some hand-coded .pb.go files and
|
|
||||||
// bootstrap protos.
|
|
||||||
// TODO: fix all of those and then add Unmarshal to
|
|
||||||
// the Message interface. Then:
|
|
||||||
// The cast above and code below can be deleted.
|
|
||||||
// The old unmarshaler can be deleted.
|
|
||||||
// Clients can call Unmarshal directly (can already do that, actually).
|
|
||||||
var info InternalMessageInfo
|
|
||||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
|
||||||
p.index = len(p.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||||
|
// Fields within a oneof are not set even if they have a default value.
|
||||||
|
// SetDefaults is recursively called upon any populated message fields.
|
||||||
|
func SetDefaults(m Message) {
|
||||||
|
if m != nil {
|
||||||
|
setDefaults(MessageReflect(m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDefaults(m protoreflect.Message) {
|
||||||
|
fds := m.Descriptor().Fields()
|
||||||
|
for i := 0; i < fds.Len(); i++ {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if !m.Has(fd) {
|
||||||
|
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||||
|
v := fd.Default()
|
||||||
|
if fd.Kind() == protoreflect.BytesKind {
|
||||||
|
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
switch {
|
||||||
|
// Handle singular message.
|
||||||
|
case fd.Cardinality() != protoreflect.Repeated:
|
||||||
|
if fd.Message() != nil {
|
||||||
|
setDefaults(m.Get(fd).Message())
|
||||||
|
}
|
||||||
|
// Handle list of messages.
|
||||||
|
case fd.IsList():
|
||||||
|
if fd.Message() != nil {
|
||||||
|
ls := m.Get(fd).List()
|
||||||
|
for i := 0; i < ls.Len(); i++ {
|
||||||
|
setDefaults(ls.Get(i).Message())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Handle map of messages.
|
||||||
|
case fd.IsMap():
|
||||||
|
if fd.MapValue().Message() != nil {
|
||||||
|
ms := m.Get(fd).Map()
|
||||||
|
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
setDefaults(v.Message())
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
124
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
124
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@@ -1,63 +1,113 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import "errors"
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
// Deprecated: do not use.
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrNil = errors.New("proto: Marshal called with nil")
|
||||||
|
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||||
|
|
||||||
|
// Deprecated: No longer returned.
|
||||||
|
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func GetStats() Stats { return Stats{} }
|
func GetStats() Stats { return Stats{} }
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||||
return nil, errors.New("proto: not implemented")
|
return nil, errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||||
return errors.New("proto: not implemented")
|
return errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||||
return nil, errors.New("proto: not implemented")
|
return nil, errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||||
return errors.New("proto: not implemented")
|
return errors.New("proto: not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: do not use.
|
// Deprecated: Do not use.
|
||||||
func RegisterMessageSetType(Message, int32, string) {}
|
func RegisterMessageSetType(Message, int32, string) {}
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func EnumName(m map[int32]string, v int32) string {
|
||||||
|
s, ok := m[v]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strconv.Itoa(int(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||||
|
if data[0] == '"' {
|
||||||
|
// New style: enums are strings.
|
||||||
|
var repr string
|
||||||
|
if err := json.Unmarshal(data, &repr); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
val, ok := m[repr]
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
// Old style: enums are ints.
|
||||||
|
var val int32
|
||||||
|
if err := json.Unmarshal(data, &val); err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||||
|
type InternalMessageInfo struct{}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||||
|
DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||||
|
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||||
|
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Size(m Message) int {
|
||||||
|
return protoV2.Size(MessageV2(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||||
|
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||||
|
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||||
|
}
|
||||||
|
|||||||
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@@ -1,48 +1,13 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
//
|
// Use of this source code is governed by a BSD-style
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
// license that can be found in the LICENSE file.
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type generatedDiscarder interface {
|
|
||||||
XXX_DiscardUnknown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields from this message
|
// DiscardUnknown recursively discards all unknown fields from this message
|
||||||
// and all embedded messages.
|
// and all embedded messages.
|
||||||
//
|
//
|
||||||
@@ -51,300 +16,43 @@ type generatedDiscarder interface {
|
|||||||
// marshal to be able to produce a message that continues to have those
|
// marshal to be able to produce a message that continues to have those
|
||||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||||
// explicitly clear the unknown fields after unmarshaling.
|
// explicitly clear the unknown fields after unmarshaling.
|
||||||
//
|
|
||||||
// For proto2 messages, the unknown fields of message extensions are only
|
|
||||||
// discarded from messages that have been accessed via GetExtension.
|
|
||||||
func DiscardUnknown(m Message) {
|
func DiscardUnknown(m Message) {
|
||||||
if m, ok := m.(generatedDiscarder); ok {
|
if m != nil {
|
||||||
m.XXX_DiscardUnknown()
|
discardUnknown(MessageReflect(m))
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
|
||||||
// but the master branch has no implementation for InternalMessageInfo,
|
|
||||||
// so it would be more work to replicate that approach.
|
|
||||||
discardLegacy(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields.
|
|
||||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
|
||||||
di := atomicLoadDiscardInfo(&a.discard)
|
|
||||||
if di == nil {
|
|
||||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
|
||||||
atomicStoreDiscardInfo(&a.discard, di)
|
|
||||||
}
|
|
||||||
di.discard(toPointer(&m))
|
|
||||||
}
|
|
||||||
|
|
||||||
type discardInfo struct {
|
|
||||||
typ reflect.Type
|
|
||||||
|
|
||||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
|
||||||
lock sync.Mutex
|
|
||||||
|
|
||||||
fields []discardFieldInfo
|
|
||||||
unrecognized field
|
|
||||||
}
|
|
||||||
|
|
||||||
type discardFieldInfo struct {
|
|
||||||
field field // Offset of field, guaranteed to be valid
|
|
||||||
discard func(src pointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
|
||||||
discardInfoLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
|
||||||
discardInfoLock.Lock()
|
|
||||||
defer discardInfoLock.Unlock()
|
|
||||||
di := discardInfoMap[t]
|
|
||||||
if di == nil {
|
|
||||||
di = &discardInfo{typ: t}
|
|
||||||
discardInfoMap[t] = di
|
|
||||||
}
|
|
||||||
return di
|
|
||||||
}
|
|
||||||
|
|
||||||
func (di *discardInfo) discard(src pointer) {
|
|
||||||
if src.isNil() {
|
|
||||||
return // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
|
||||||
di.computeDiscardInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range di.fields {
|
|
||||||
sfp := src.offset(fi.field)
|
|
||||||
fi.discard(sfp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For proto2 messages, only discard unknown fields in message extensions
|
|
||||||
// that have been accessed via GetExtension.
|
|
||||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
|
||||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
|
||||||
emm, _ := em.extensionsRead()
|
|
||||||
for _, mx := range emm {
|
|
||||||
if m, ok := mx.value.(Message); ok {
|
|
||||||
DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if di.unrecognized.IsValid() {
|
|
||||||
*src.offset(di.unrecognized).toBytes() = nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (di *discardInfo) computeDiscardInfo() {
|
func discardUnknown(m protoreflect.Message) {
|
||||||
di.lock.Lock()
|
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||||
defer di.lock.Unlock()
|
|
||||||
if di.initialized != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := di.typ
|
|
||||||
n := t.NumField()
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dfi := discardFieldInfo{field: toField(&f)}
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// Unwrap tf to get its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
switch {
|
||||||
case !isPointer:
|
// Handle singular message.
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
case fd.Cardinality() != protoreflect.Repeated:
|
||||||
case isSlice: // E.g., []*pb.T
|
if fd.Message() != nil {
|
||||||
di := getDiscardInfo(tf)
|
discardUnknown(m.Get(fd).Message())
|
||||||
dfi.discard = func(src pointer) {
|
}
|
||||||
sps := src.getPointerSlice()
|
// Handle list of messages.
|
||||||
for _, sp := range sps {
|
case fd.IsList():
|
||||||
if !sp.isNil() {
|
if fd.Message() != nil {
|
||||||
di.discard(sp)
|
ls := m.Get(fd).List()
|
||||||
|
for i := 0; i < ls.Len(); i++ {
|
||||||
|
discardUnknown(ls.Get(i).Message())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
// Handle map of messages.
|
||||||
default: // E.g., *pb.T
|
case fd.IsMap():
|
||||||
di := getDiscardInfo(tf)
|
if fd.MapValue().Message() != nil {
|
||||||
dfi.discard = func(src pointer) {
|
ms := m.Get(fd).Map()
|
||||||
sp := src.getPointer()
|
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
if !sp.isNil() {
|
discardUnknown(v.Message())
|
||||||
di.discard(sp)
|
return true
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return true
|
||||||
case reflect.Map:
|
})
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
|
||||||
default: // E.g., map[K]V
|
|
||||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
|
||||||
dfi.discard = func(src pointer) {
|
|
||||||
sm := src.asPointerTo(tf).Elem()
|
|
||||||
if sm.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
DiscardUnknown(val.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dfi.discard = func(pointer) {} // Noop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
|
||||||
default: // E.g., interface{}
|
|
||||||
// TODO: Make this faster?
|
|
||||||
dfi.discard = func(src pointer) {
|
|
||||||
su := src.asPointerTo(tf).Elem()
|
|
||||||
if !su.IsNil() {
|
|
||||||
sv := su.Elem().Elem().Field(0)
|
|
||||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch sv.Type().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
DiscardUnknown(sv.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
di.fields = append(di.fields, dfi)
|
|
||||||
}
|
|
||||||
|
|
||||||
di.unrecognized = invalidField
|
// Discard unknown fields.
|
||||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
if len(m.GetUnknown()) > 0 {
|
||||||
if f.Type != reflect.TypeOf([]byte{}) {
|
m.SetUnknown(nil)
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
|
||||||
}
|
|
||||||
di.unrecognized = toField(&f)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreInt32(&di.initialized, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func discardLegacy(m Message) {
|
|
||||||
v := reflect.ValueOf(m)
|
|
||||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = v.Elem()
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := v.Type()
|
|
||||||
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
vf := v.Field(i)
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// Unwrap tf to get its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
|
||||||
case !isPointer:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
|
||||||
case isSlice: // E.g., []*pb.T
|
|
||||||
for j := 0; j < vf.Len(); j++ {
|
|
||||||
discardLegacy(vf.Index(j).Interface().(Message))
|
|
||||||
}
|
|
||||||
default: // E.g., *pb.T
|
|
||||||
discardLegacy(vf.Interface().(Message))
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
|
||||||
default: // E.g., map[K]V
|
|
||||||
tv := vf.Type().Elem()
|
|
||||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
|
||||||
for _, key := range vf.MapKeys() {
|
|
||||||
val := vf.MapIndex(key)
|
|
||||||
discardLegacy(val.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
|
||||||
default: // E.g., test_proto.isCommunique_Union interface
|
|
||||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
|
||||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
|
||||||
if !vf.IsNil() {
|
|
||||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
|
||||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
|
||||||
if vf.Kind() == reflect.Ptr {
|
|
||||||
discardLegacy(vf.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
|
||||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
|
||||||
}
|
|
||||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// For proto2 messages, only discard unknown fields in message extensions
|
|
||||||
// that have been accessed via GetExtension.
|
|
||||||
if em, err := extendable(m); err == nil {
|
|
||||||
// Ignore lock since discardLegacy is not concurrency safe.
|
|
||||||
emm, _ := em.extensionsRead()
|
|
||||||
for _, mx := range emm {
|
|
||||||
if m, ok := mx.value.(Message); ok {
|
|
||||||
discardLegacy(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@@ -1,203 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for encoding data into the wire format for protocol buffers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
|
||||||
// a struct with a repeated field containing a nil element.
|
|
||||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
|
||||||
|
|
||||||
// errOneofHasNil is the error returned if Marshal is called with
|
|
||||||
// a struct with a oneof field containing a nil element.
|
|
||||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
|
||||||
|
|
||||||
// ErrNil is the error returned if Marshal is called with nil.
|
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
|
||||||
|
|
||||||
// ErrTooLarge is the error returned if Marshal is called with a
|
|
||||||
// message that encodes to >2GB.
|
|
||||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
|
||||||
)
|
|
||||||
|
|
||||||
// The fundamental encoders that put bytes on the wire.
|
|
||||||
// Those that take integer types all accept uint64 and are
|
|
||||||
// therefore of type valueEncoder.
|
|
||||||
|
|
||||||
const maxVarintBytes = 10 // maximum length of a varint
|
|
||||||
|
|
||||||
// EncodeVarint returns the varint encoding of x.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
// Not used by the package itself, but helpful to clients
|
|
||||||
// wishing to use the same encoding.
|
|
||||||
func EncodeVarint(x uint64) []byte {
|
|
||||||
var buf [maxVarintBytes]byte
|
|
||||||
var n int
|
|
||||||
for n = 0; x > 127; n++ {
|
|
||||||
buf[n] = 0x80 | uint8(x&0x7F)
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
buf[n] = uint8(x)
|
|
||||||
n++
|
|
||||||
return buf[0:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
|
||||||
for x >= 1<<7 {
|
|
||||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
p.buf = append(p.buf, uint8(x))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SizeVarint returns the varint encoding size of an integer.
|
|
||||||
func SizeVarint(x uint64) int {
|
|
||||||
switch {
|
|
||||||
case x < 1<<7:
|
|
||||||
return 1
|
|
||||||
case x < 1<<14:
|
|
||||||
return 2
|
|
||||||
case x < 1<<21:
|
|
||||||
return 3
|
|
||||||
case x < 1<<28:
|
|
||||||
return 4
|
|
||||||
case x < 1<<35:
|
|
||||||
return 5
|
|
||||||
case x < 1<<42:
|
|
||||||
return 6
|
|
||||||
case x < 1<<49:
|
|
||||||
return 7
|
|
||||||
case x < 1<<56:
|
|
||||||
return 8
|
|
||||||
case x < 1<<63:
|
|
||||||
return 9
|
|
||||||
}
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
|
||||||
p.buf = append(p.buf,
|
|
||||||
uint8(x),
|
|
||||||
uint8(x>>8),
|
|
||||||
uint8(x>>16),
|
|
||||||
uint8(x>>24),
|
|
||||||
uint8(x>>32),
|
|
||||||
uint8(x>>40),
|
|
||||||
uint8(x>>48),
|
|
||||||
uint8(x>>56))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// fixed32, sfixed32, and float protocol buffer types.
|
|
||||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
|
||||||
p.buf = append(p.buf,
|
|
||||||
uint8(x),
|
|
||||||
uint8(x>>8),
|
|
||||||
uint8(x>>16),
|
|
||||||
uint8(x>>24))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
|
||||||
// to the Buffer.
|
|
||||||
// This is the format used for the sint64 protocol buffer type.
|
|
||||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
|
||||||
// use signed number to get arithmetic right shift.
|
|
||||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
|
||||||
// to the Buffer.
|
|
||||||
// This is the format used for the sint32 protocol buffer type.
|
|
||||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
|
||||||
// use signed number to get arithmetic right shift.
|
|
||||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
|
||||||
// This is the format used for the bytes protocol buffer
|
|
||||||
// type and for embedded messages.
|
|
||||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
|
||||||
p.EncodeVarint(uint64(len(b)))
|
|
||||||
p.buf = append(p.buf, b...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
|
||||||
// This is the format used for the proto2 string type.
|
|
||||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
|
||||||
p.EncodeVarint(uint64(len(s)))
|
|
||||||
p.buf = append(p.buf, s...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshaler is the interface representing objects that can marshal themselves.
|
|
||||||
type Marshaler interface {
|
|
||||||
Marshal() ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
|
||||||
// prefixed by a varint-encoded length.
|
|
||||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
|
||||||
siz := Size(pb)
|
|
||||||
p.EncodeVarint(uint64(siz))
|
|
||||||
return p.Marshal(pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// All protocol buffer fields are nillable, but be careful.
|
|
||||||
func isNil(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@@ -1,301 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Protocol buffer comparison.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Equal returns true iff protocol buffers a and b are equal.
|
|
||||||
The arguments must both be pointers to protocol buffer structs.
|
|
||||||
|
|
||||||
Equality is defined in this way:
|
|
||||||
- Two messages are equal iff they are the same type,
|
|
||||||
corresponding fields are equal, unknown field sets
|
|
||||||
are equal, and extensions sets are equal.
|
|
||||||
- Two set scalar fields are equal iff their values are equal.
|
|
||||||
If the fields are of a floating-point type, remember that
|
|
||||||
NaN != x for all x, including NaN. If the message is defined
|
|
||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
|
||||||
and their corresponding elements are equal. Note a "bytes" field,
|
|
||||||
although represented by []byte, is not a repeated field and the
|
|
||||||
rule for the scalar fields described above applies.
|
|
||||||
- Two unset fields are equal.
|
|
||||||
- Two unknown field sets are equal if their current
|
|
||||||
encoded state is equal.
|
|
||||||
- Two extension sets are equal iff they have corresponding
|
|
||||||
elements that are pairwise equal.
|
|
||||||
- Two map fields are equal iff their lengths are the same,
|
|
||||||
and they contain the same set of elements. Zero-length map
|
|
||||||
fields are equal.
|
|
||||||
- Every other combination of things are not equal.
|
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
|
||||||
*/
|
|
||||||
func Equal(a, b Message) bool {
|
|
||||||
if a == nil || b == nil {
|
|
||||||
return a == b
|
|
||||||
}
|
|
||||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
|
||||||
if v1.Type() != v2.Type() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if v1.Kind() == reflect.Ptr {
|
|
||||||
if v1.IsNil() {
|
|
||||||
return v2.IsNil()
|
|
||||||
}
|
|
||||||
if v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
v1, v2 = v1.Elem(), v2.Elem()
|
|
||||||
}
|
|
||||||
if v1.Kind() != reflect.Struct {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalStruct(v1, v2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
|
||||||
func equalStruct(v1, v2 reflect.Value) bool {
|
|
||||||
sprop := GetProperties(v1.Type())
|
|
||||||
for i := 0; i < v1.NumField(); i++ {
|
|
||||||
f := v1.Type().Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f1, f2 := v1.Field(i), v2.Field(i)
|
|
||||||
if f.Type.Kind() == reflect.Ptr {
|
|
||||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
|
||||||
// both unset
|
|
||||||
continue
|
|
||||||
} else if n1 != n2 {
|
|
||||||
// set/unset mismatch
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
f1, f2 = f1.Elem(), f2.Elem()
|
|
||||||
}
|
|
||||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
|
||||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
|
||||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
|
||||||
em2 := v2.FieldByName("XXX_extensions")
|
|
||||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uf := v1.FieldByName("XXX_unrecognized")
|
|
||||||
if !uf.IsValid() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
u1 := uf.Bytes()
|
|
||||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
|
||||||
return bytes.Equal(u1, u2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
|
||||||
// prop may be nil.
|
|
||||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|
||||||
if v1.Type() == protoMessageType {
|
|
||||||
m1, _ := v1.Interface().(Message)
|
|
||||||
m2, _ := v2.Interface().(Message)
|
|
||||||
return Equal(m1, m2)
|
|
||||||
}
|
|
||||||
switch v1.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return v1.Bool() == v2.Bool()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v1.Float() == v2.Float()
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
return v1.Int() == v2.Int()
|
|
||||||
case reflect.Interface:
|
|
||||||
// Probably a oneof field; compare the inner values.
|
|
||||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
|
||||||
if n1 || n2 {
|
|
||||||
return n1 == n2
|
|
||||||
}
|
|
||||||
e1, e2 := v1.Elem(), v2.Elem()
|
|
||||||
if e1.Type() != e2.Type() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalAny(e1, e2, nil)
|
|
||||||
case reflect.Map:
|
|
||||||
if v1.Len() != v2.Len() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, key := range v1.MapKeys() {
|
|
||||||
val2 := v2.MapIndex(key)
|
|
||||||
if !val2.IsValid() {
|
|
||||||
// This key was not found in the second map.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Maps may have nil values in them, so check for nil.
|
|
||||||
if v1.IsNil() && v2.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v1.IsNil() != v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
|
||||||
case reflect.Slice:
|
|
||||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
|
||||||
// short circuit: []byte
|
|
||||||
|
|
||||||
// Edge case: if this is in a proto3 message, a zero length
|
|
||||||
// bytes field is considered the zero value.
|
|
||||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v1.IsNil() != v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v1.Len() != v2.Len() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := 0; i < v1.Len(); i++ {
|
|
||||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.String:
|
|
||||||
return v1.Interface().(string) == v2.Interface().(string)
|
|
||||||
case reflect.Struct:
|
|
||||||
return equalStruct(v1, v2)
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
return v1.Uint() == v2.Uint()
|
|
||||||
}
|
|
||||||
|
|
||||||
// unknown type, so not a protocol buffer
|
|
||||||
log.Printf("proto: don't know how to compare %v", v1)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// base is the struct type that the extensions are based on.
|
|
||||||
// x1 and x2 are InternalExtensions.
|
|
||||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
|
||||||
em1, _ := x1.extensionsRead()
|
|
||||||
em2, _ := x2.extensionsRead()
|
|
||||||
return equalExtMap(base, em1, em2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|
||||||
if len(em1) != len(em2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for extNum, e1 := range em1 {
|
|
||||||
e2, ok := em2[extNum]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
m1 := extensionAsLegacyType(e1.value)
|
|
||||||
m2 := extensionAsLegacyType(e2.value)
|
|
||||||
|
|
||||||
if m1 == nil && m2 == nil {
|
|
||||||
// Both have only encoded form.
|
|
||||||
if bytes.Equal(e1.enc, e2.enc) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// The bytes are different, but the extensions might still be
|
|
||||||
// equal. We need to decode them to compare.
|
|
||||||
}
|
|
||||||
|
|
||||||
if m1 != nil && m2 != nil {
|
|
||||||
// Both are unencoded.
|
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// At least one is encoded. To do a semantically correct comparison
|
|
||||||
// we need to unmarshal them first.
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
if m := extensionMaps[base]; m != nil {
|
|
||||||
desc = m[extNum]
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
// If both have only encoded form and the bytes are the same,
|
|
||||||
// it is handled above. We get here when the bytes are different.
|
|
||||||
// We don't know how to decode it, so just compare them as byte
|
|
||||||
// slices.
|
|
||||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if m1 == nil {
|
|
||||||
m1, err = decodeExtension(e1.enc, desc)
|
|
||||||
}
|
|
||||||
if m2 == nil && err == nil {
|
|
||||||
m2, err = decodeExtension(e2.enc, desc)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// The encoded form is invalid.
|
|
||||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
783
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
783
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@@ -1,607 +1,356 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
/*
|
|
||||||
* Types and routines for supporting protocol buffer extensions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"sync"
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
type (
|
||||||
|
// ExtensionDesc represents an extension descriptor and
|
||||||
|
// is used to interact with an extension field in a message.
|
||||||
|
//
|
||||||
|
// Variables of this type are generated in code by protoc-gen-go.
|
||||||
|
ExtensionDesc = protoimpl.ExtensionInfo
|
||||||
|
|
||||||
|
// ExtensionRange represents a range of message extensions.
|
||||||
|
// Used in code generated by protoc-gen-go.
|
||||||
|
ExtensionRange = protoiface.ExtensionRangeV1
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this is an internal type.
|
||||||
|
Extension = protoimpl.ExtensionFieldV1
|
||||||
|
|
||||||
|
// Deprecated: Do not use; this is an internal type.
|
||||||
|
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrMissingExtension reports whether the extension was not present.
|
||||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||||
|
|
||||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
|
||||||
// Used in code generated by the protocol compiler.
|
|
||||||
type ExtensionRange struct {
|
|
||||||
Start, End int32 // both inclusive
|
|
||||||
}
|
|
||||||
|
|
||||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
|
||||||
// proto compiler that may be extended.
|
|
||||||
type extendableProto interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
extensionsWrite() map[int32]Extension
|
|
||||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
|
||||||
// version of the proto compiler that may be extended.
|
|
||||||
type extendableProtoV1 interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
ExtensionMap() map[int32]Extension
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
|
||||||
type extensionAdapter struct {
|
|
||||||
extendableProtoV1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
|
||||||
return e.ExtensionMap()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
|
||||||
return e.ExtensionMap(), notLocker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
|
||||||
type notLocker struct{}
|
|
||||||
|
|
||||||
func (n notLocker) Lock() {}
|
|
||||||
func (n notLocker) Unlock() {}
|
|
||||||
|
|
||||||
// extendable returns the extendableProto interface for the given generated proto message.
|
|
||||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
|
||||||
// the extendableProto interface.
|
|
||||||
func extendable(p interface{}) (extendableProto, error) {
|
|
||||||
switch p := p.(type) {
|
|
||||||
case extendableProto:
|
|
||||||
if isNilPtr(p) {
|
|
||||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
case extendableProtoV1:
|
|
||||||
if isNilPtr(p) {
|
|
||||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
|
||||||
}
|
|
||||||
return extensionAdapter{p}, nil
|
|
||||||
}
|
|
||||||
// Don't allocate a specific error containing %T:
|
|
||||||
// this is the hot path for Clone and MarshalText.
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||||
|
|
||||||
func isNilPtr(x interface{}) bool {
|
// HasExtension reports whether the extension field is present in m
|
||||||
v := reflect.ValueOf(x)
|
// either as an explicitly populated field or as an unknown field.
|
||||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||||
}
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
return false
|
||||||
//
|
|
||||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
|
||||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
|
||||||
//
|
|
||||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
|
||||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
|
||||||
type XXX_InternalExtensions struct {
|
|
||||||
// The struct must be indirect so that if a user inadvertently copies a
|
|
||||||
// generated message and its embedded XXX_InternalExtensions, they
|
|
||||||
// avoid the mayhem of a copied mutex.
|
|
||||||
//
|
|
||||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
|
||||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
|
||||||
// mutually exclusive with other accesses.
|
|
||||||
p *struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsWrite returns the extension map, creating it on first use.
|
// Check whether any populated known field matches the field number.
|
||||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
xtd := xt.TypeDescriptor()
|
||||||
if e.p == nil {
|
if isValidExtension(mr.Descriptor(), xtd) {
|
||||||
e.p = new(struct {
|
has = mr.Has(xtd)
|
||||||
mu sync.Mutex
|
} else {
|
||||||
extensionMap map[int32]Extension
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
|
has = int32(fd.Number()) == xt.Field
|
||||||
|
return !has
|
||||||
})
|
})
|
||||||
e.p.extensionMap = make(map[int32]Extension)
|
|
||||||
}
|
}
|
||||||
return e.p.extensionMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
// Check whether any unknown field matches the field number.
|
||||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
num, _, n := protowire.ConsumeField(b)
|
||||||
if e.p == nil {
|
has = int32(num) == xt.Field
|
||||||
return nil, nil
|
b = b[n:]
|
||||||
}
|
}
|
||||||
return e.p.extensionMap, &e.p.mu
|
return has
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtensionDesc represents an extension specification.
|
// ClearExtension removes the extension field from m
|
||||||
// Used in generated code from the protocol compiler.
|
// either as an explicitly populated field or as an unknown field.
|
||||||
type ExtensionDesc struct {
|
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||||
ExtendedType Message // nil pointer to the type that is being extended
|
mr := MessageReflect(m)
|
||||||
ExtensionType interface{} // nil pointer to the extension type
|
if mr == nil || !mr.IsValid() {
|
||||||
Field int32 // field number
|
|
||||||
Name string // fully-qualified name of extension, for text formatting
|
|
||||||
Tag string // protobuf tag style
|
|
||||||
Filename string // name of the file in which the extension is defined
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ed *ExtensionDesc) repeated() bool {
|
|
||||||
t := reflect.TypeOf(ed.ExtensionType)
|
|
||||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extension represents an extension in a message.
|
|
||||||
type Extension struct {
|
|
||||||
// When an extension is stored in a message using SetExtension
|
|
||||||
// only desc and value are set. When the message is marshaled
|
|
||||||
// enc will be set to the encoded form of the message.
|
|
||||||
//
|
|
||||||
// When a message is unmarshaled and contains extensions, each
|
|
||||||
// extension will have only enc set. When such an extension is
|
|
||||||
// accessed using GetExtension (or GetExtensions) desc and value
|
|
||||||
// will be set.
|
|
||||||
desc *ExtensionDesc
|
|
||||||
|
|
||||||
// value is a concrete value for the extension field. Let the type of
|
|
||||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
|
||||||
// be the "storage type". The API type and storage type are the same except:
|
|
||||||
// * For scalars (except []byte), the API type uses *T,
|
|
||||||
// while the storage type uses T.
|
|
||||||
// * For repeated fields, the API type uses []T, while the storage type
|
|
||||||
// uses *[]T.
|
|
||||||
//
|
|
||||||
// The reason for the divergence is so that the storage type more naturally
|
|
||||||
// matches what is expected of when retrieving the values through the
|
|
||||||
// protobuf reflection APIs.
|
|
||||||
//
|
|
||||||
// The value may only be populated if desc is also populated.
|
|
||||||
value interface{}
|
|
||||||
|
|
||||||
// enc is the raw bytes for the extension field.
|
|
||||||
enc []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRawExtension is for testing only.
|
|
||||||
func SetRawExtension(base Message, id int32, b []byte) {
|
|
||||||
epb, err := extendable(base)
|
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[id] = Extension{enc: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExtensionField returns true iff the given field number is in an extension range.
|
xtd := xt.TypeDescriptor()
|
||||||
func isExtensionField(pb extendableProto, field int32) bool {
|
if isValidExtension(mr.Descriptor(), xtd) {
|
||||||
for _, er := range pb.ExtensionRangeArray() {
|
mr.Clear(xtd)
|
||||||
if er.Start <= field && field <= er.End {
|
} else {
|
||||||
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
|
if int32(fd.Number()) == xt.Field {
|
||||||
|
mr.Clear(fd)
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
// ClearAllExtensions clears all extensions from m.
|
||||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
// This includes populated fields and unknown fields in the extension range.
|
||||||
var pbi interface{} = pb
|
func ClearAllExtensions(m Message) {
|
||||||
// Check the extended type.
|
mr := MessageReflect(m)
|
||||||
if ea, ok := pbi.(extensionAdapter); ok {
|
if mr == nil || !mr.IsValid() {
|
||||||
pbi = ea.extendableProtoV1
|
|
||||||
}
|
|
||||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
|
||||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
|
||||||
}
|
|
||||||
// Check the range.
|
|
||||||
if !isExtensionField(pb, extension.Field) {
|
|
||||||
return errors.New("proto: bad extension number; not in declared ranges")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extPropKey is sufficient to uniquely identify an extension.
|
|
||||||
type extPropKey struct {
|
|
||||||
base reflect.Type
|
|
||||||
field int32
|
|
||||||
}
|
|
||||||
|
|
||||||
var extProp = struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[extPropKey]*Properties
|
|
||||||
}{
|
|
||||||
m: make(map[extPropKey]*Properties),
|
|
||||||
}
|
|
||||||
|
|
||||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
|
||||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
|
||||||
|
|
||||||
extProp.RLock()
|
|
||||||
if prop, ok := extProp.m[key]; ok {
|
|
||||||
extProp.RUnlock()
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
extProp.RUnlock()
|
|
||||||
|
|
||||||
extProp.Lock()
|
|
||||||
defer extProp.Unlock()
|
|
||||||
// Check again.
|
|
||||||
if prop, ok := extProp.m[key]; ok {
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
prop := new(Properties)
|
|
||||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
|
||||||
extProp.m[key] = prop
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasExtension returns whether the given extension is present in pb.
|
|
||||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
|
||||||
// TODO: Check types, field numbers, etc.?
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
extmap, mu := epb.extensionsRead()
|
|
||||||
if extmap == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
_, ok := extmap[extension.Field]
|
|
||||||
mu.Unlock()
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearExtension removes the given extension from pb.
|
|
||||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: Check types, field numbers, etc.?
|
|
||||||
extmap := epb.extensionsWrite()
|
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||||
delete(extmap, extension.Field)
|
if fd.IsExtension() {
|
||||||
|
mr.Clear(fd)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtension retrieves a proto2 extended field from pb.
|
// GetExtension retrieves a proto2 extended field from m.
|
||||||
//
|
//
|
||||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||||
// If the field is not present, then the default value is returned (if one is specified),
|
// If the field is not present, then the default value is returned (if one is specified),
|
||||||
// otherwise ErrMissingExtension is reported.
|
// otherwise ErrMissingExtension is reported.
|
||||||
//
|
//
|
||||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||||
epb, err := extendable(pb)
|
mr := MessageReflect(m)
|
||||||
if err != nil {
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
|
return nil, errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the unknown fields for this extension field.
|
||||||
|
var bo protoreflect.RawFields
|
||||||
|
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(bi)
|
||||||
|
if int32(num) == xt.Field {
|
||||||
|
bo = append(bo, bi[:n]...)
|
||||||
|
}
|
||||||
|
bi = bi[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||||
|
if xt.ExtensionType == nil {
|
||||||
|
return []byte(bo), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the extension field only exists as unknown fields, unmarshal it.
|
||||||
|
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||||
|
xtd := xt.TypeDescriptor()
|
||||||
|
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||||
|
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||||
|
}
|
||||||
|
if !mr.Has(xtd) && len(bo) > 0 {
|
||||||
|
m2 := mr.New()
|
||||||
|
if err := (proto.UnmarshalOptions{
|
||||||
|
Resolver: extensionResolver{xt},
|
||||||
|
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if m2.Has(xtd) {
|
||||||
if extension.ExtendedType != nil {
|
mr.Set(xtd, m2.Get(xtd))
|
||||||
// can only check type if this is a complete descriptor
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
// Check whether the message has the extension field set or a default.
|
||||||
if emap == nil {
|
var pv protoreflect.Value
|
||||||
return defaultExtensionValue(extension)
|
switch {
|
||||||
}
|
case mr.Has(xtd):
|
||||||
mu.Lock()
|
pv = mr.Get(xtd)
|
||||||
defer mu.Unlock()
|
case xtd.HasDefault():
|
||||||
e, ok := emap[extension.Field]
|
pv = xtd.Default()
|
||||||
if !ok {
|
default:
|
||||||
// defaultExtensionValue returns the default value or
|
|
||||||
// ErrMissingExtension if there is no default.
|
|
||||||
return defaultExtensionValue(extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.value != nil {
|
|
||||||
// Already decoded. Check the descriptor, though.
|
|
||||||
if e.desc != extension {
|
|
||||||
// This shouldn't happen. If it does, it means that
|
|
||||||
// GetExtension was called twice with two different
|
|
||||||
// descriptors with the same field number.
|
|
||||||
return nil, errors.New("proto: descriptor conflict")
|
|
||||||
}
|
|
||||||
return extensionAsLegacyType(e.value), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if extension.ExtensionType == nil {
|
|
||||||
// incomplete descriptor
|
|
||||||
return e.enc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := decodeExtension(e.enc, extension)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remember the decoded version and drop the encoded version.
|
|
||||||
// That way it is safe to mutate what we return.
|
|
||||||
e.value = extensionAsStorageType(v)
|
|
||||||
e.desc = extension
|
|
||||||
e.enc = nil
|
|
||||||
emap[extension.Field] = e
|
|
||||||
return extensionAsLegacyType(e.value), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultExtensionValue returns the default value for extension.
|
|
||||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
|
||||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
|
||||||
if extension.ExtensionType == nil {
|
|
||||||
// incomplete descriptor, so no default
|
|
||||||
return nil, ErrMissingExtension
|
return nil, ErrMissingExtension
|
||||||
}
|
}
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
v := xt.InterfaceOf(pv)
|
||||||
props := extensionProperties(extension)
|
rv := reflect.ValueOf(v)
|
||||||
|
if isScalarKind(rv.Kind()) {
|
||||||
sf, _, err := fieldDefault(t, props)
|
rv2 := reflect.New(rv.Type())
|
||||||
if err != nil {
|
rv2.Elem().Set(rv)
|
||||||
return nil, err
|
v = rv2.Interface()
|
||||||
}
|
}
|
||||||
|
return v, nil
|
||||||
if sf == nil || sf.value == nil {
|
|
||||||
// There is no default value.
|
|
||||||
return nil, ErrMissingExtension
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
// We do not need to return a Ptr, we can directly return sf.value.
|
|
||||||
return sf.value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to return an interface{} that is a pointer to sf.value.
|
|
||||||
value := reflect.New(t).Elem()
|
|
||||||
value.Set(reflect.New(value.Type().Elem()))
|
|
||||||
if sf.kind == reflect.Int32 {
|
|
||||||
// We may have an int32 or an enum, but the underlying data is int32.
|
|
||||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
|
||||||
// set it as a int32.
|
|
||||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
|
||||||
} else {
|
|
||||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
|
||||||
}
|
|
||||||
return value.Interface(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeExtension decodes an extension encoded in b.
|
// extensionResolver is a custom extension resolver that stores a single
|
||||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
// extension type that takes precedence over the global registry.
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
|
||||||
|
|
||||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||||
// Allocate space to store the pointer/slice.
|
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||||
value := reflect.New(t).Elem()
|
return r.xt, nil
|
||||||
|
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
x, n := decodeVarint(b)
|
|
||||||
if n == 0 {
|
|
||||||
return nil, io.ErrUnexpectedEOF
|
|
||||||
}
|
}
|
||||||
b = b[n:]
|
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||||
wire := int(x) & 7
|
|
||||||
|
|
||||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value.Interface(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
return r.xt, nil
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
extensions = make([]interface{}, len(es))
|
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||||
for i, e := range es {
|
}
|
||||||
extensions[i], err = GetExtension(epb, e)
|
|
||||||
|
// GetExtensions returns a list of the extensions values present in m,
|
||||||
|
// corresponding with the provided list of extension descriptors, xts.
|
||||||
|
// If an extension is missing in m, the corresponding value is nil.
|
||||||
|
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
|
return nil, errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
vs := make([]interface{}, len(xts))
|
||||||
|
for i, xt := range xts {
|
||||||
|
v, err := GetExtension(m, xt)
|
||||||
|
if err != nil {
|
||||||
if err == ErrMissingExtension {
|
if err == ErrMissingExtension {
|
||||||
err = nil
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
return vs, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
vs[i] = v
|
||||||
}
|
}
|
||||||
return
|
return vs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
// SetExtension sets an extension field in m to the provided value.
|
||||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||||
// just the Field field, which defines the extension's field number.
|
mr := MessageReflect(m)
|
||||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
epb, err := extendable(pb)
|
return errNotExtendable
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
registeredExtensions := RegisteredExtensions(pb)
|
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
rv := reflect.ValueOf(v)
|
||||||
if emap == nil {
|
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||||
return nil, nil
|
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||||
}
|
}
|
||||||
mu.Lock()
|
if rv.Kind() == reflect.Ptr {
|
||||||
defer mu.Unlock()
|
if rv.IsNil() {
|
||||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||||
for extid, e := range emap {
|
}
|
||||||
desc := e.desc
|
if isScalarKind(rv.Elem().Kind()) {
|
||||||
if desc == nil {
|
v = rv.Elem().Interface()
|
||||||
desc = registeredExtensions[extid]
|
|
||||||
if desc == nil {
|
|
||||||
desc = &ExtensionDesc{Field: extid}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extensions = append(extensions, desc)
|
xtd := xt.TypeDescriptor()
|
||||||
|
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||||
|
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||||
}
|
}
|
||||||
return extensions, nil
|
mr.Set(xtd, xt.ValueOf(v))
|
||||||
}
|
clearUnknown(mr, fieldNum(xt.Field))
|
||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
|
||||||
epb, err := extendable(pb)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
typ := reflect.TypeOf(extension.ExtensionType)
|
|
||||||
if typ != reflect.TypeOf(value) {
|
|
||||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
|
||||||
}
|
|
||||||
// nil extension values need to be caught early, because the
|
|
||||||
// encoder can't distinguish an ErrNil due to a nil extension
|
|
||||||
// from an ErrNil due to a missing field. Extensions are
|
|
||||||
// always optional, so the encoder would just swallow the error
|
|
||||||
// and drop all the extensions from the encoded message.
|
|
||||||
if reflect.ValueOf(value).IsNil() {
|
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearAllExtensions clears all extensions from pb.
|
// SetRawExtension inserts b into the unknown fields of m.
|
||||||
func ClearAllExtensions(pb Message) {
|
//
|
||||||
epb, err := extendable(pb)
|
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||||
if err != nil {
|
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
m := epb.extensionsWrite()
|
|
||||||
for k := range m {
|
// Verify that the raw field is valid.
|
||||||
delete(m, k)
|
for b0 := b; len(b0) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(b0)
|
||||||
|
if int32(num) != fnum {
|
||||||
|
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||||
}
|
}
|
||||||
|
b0 = b0[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||||
|
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// A global registry of extensions.
|
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
// containing descriptors for both populated extension fields in m and
|
||||||
|
// also unknown fields of m that are in the extension range.
|
||||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
// For the later case, an type incomplete descriptor is provided where only
|
||||||
|
// the ExtensionDesc.Field field is populated.
|
||||||
// RegisterExtension is called from the generated code.
|
// The order of the extension descriptors is undefined.
|
||||||
func RegisterExtension(desc *ExtensionDesc) {
|
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
mr := MessageReflect(m)
|
||||||
m := extensionMaps[st]
|
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||||
if m == nil {
|
return nil, errNotExtendable
|
||||||
m = make(map[int32]*ExtensionDesc)
|
|
||||||
extensionMaps[st] = m
|
|
||||||
}
|
}
|
||||||
if _, ok := m[desc.Field]; ok {
|
|
||||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
// Collect a set of known extension descriptors.
|
||||||
|
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||||
|
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||||
|
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||||
|
extDescs[fd.Number()] = xd
|
||||||
}
|
}
|
||||||
m[desc.Field] = desc
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Collect a set of unknown extension descriptors.
|
||||||
|
extRanges := mr.Descriptor().ExtensionRanges()
|
||||||
|
for b := mr.GetUnknown(); len(b) > 0; {
|
||||||
|
num, _, n := protowire.ConsumeField(b)
|
||||||
|
if extRanges.Has(num) && extDescs[num] == nil {
|
||||||
|
extDescs[num] = nil
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transpose the set of descriptors into a list.
|
||||||
|
var xts []*ExtensionDesc
|
||||||
|
for num, xt := range extDescs {
|
||||||
|
if xt == nil {
|
||||||
|
xt = &ExtensionDesc{Field: int32(num)}
|
||||||
|
}
|
||||||
|
xts = append(xts, xt)
|
||||||
|
}
|
||||||
|
return xts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisteredExtensions returns a map of the registered extensions of a
|
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||||
// protocol buffer struct, indexed by the extension number.
|
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||||
// The argument pb should be a nil pointer to the struct type.
|
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
|
||||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||||
// See Extension.value.
|
// This function exists for historical reasons since the representation of
|
||||||
func extensionAsLegacyType(v interface{}) interface{} {
|
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
func isScalarKind(k reflect.Kind) bool {
|
||||||
|
switch k {
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||||
// Represent primitive types as a pointer to the value.
|
return true
|
||||||
rv2 := reflect.New(rv.Type())
|
default:
|
||||||
rv2.Elem().Set(rv)
|
return false
|
||||||
v = rv2.Interface()
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Represent slice types as the value itself.
|
|
||||||
switch rv.Type().Elem().Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
if rv.IsNil() {
|
|
||||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
|
||||||
} else {
|
|
||||||
v = rv.Elem().Interface()
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||||
// See Extension.value.
|
func clearUnknown(m protoreflect.Message, remover interface {
|
||||||
func extensionAsStorageType(v interface{}) interface{} {
|
Has(protoreflect.FieldNumber) bool
|
||||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
}) {
|
||||||
case reflect.Ptr:
|
var bo protoreflect.RawFields
|
||||||
// Represent slice types as the value itself.
|
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||||
switch rv.Type().Elem().Kind() {
|
num, _, n := protowire.ConsumeField(bi)
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
if !remover.Has(num) {
|
||||||
if rv.IsNil() {
|
bo = append(bo, bi[:n]...)
|
||||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
|
||||||
} else {
|
|
||||||
v = rv.Elem().Interface()
|
|
||||||
}
|
}
|
||||||
|
bi = bi[n:]
|
||||||
}
|
}
|
||||||
case reflect.Slice:
|
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||||
// Represent slice types as a pointer to the value.
|
m.SetUnknown(bo)
|
||||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
|
||||||
rv2 := reflect.New(rv.Type())
|
|
||||||
rv2.Elem().Set(rv)
|
|
||||||
v = rv2.Interface()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return v
|
|
||||||
|
type fieldNum protoreflect.FieldNumber
|
||||||
|
|
||||||
|
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||||
|
return protoreflect.FieldNumber(n1) == n2
|
||||||
}
|
}
|
||||||
|
|||||||
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@@ -1,965 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package proto converts data structures to and from the wire format of
|
|
||||||
protocol buffers. It works in concert with the Go source code generated
|
|
||||||
for .proto files by the protocol compiler.
|
|
||||||
|
|
||||||
A summary of the properties of the protocol buffer interface
|
|
||||||
for a protocol buffer variable v:
|
|
||||||
|
|
||||||
- Names are turned from camel_case to CamelCase for export.
|
|
||||||
- There are no methods on v to set fields; just treat
|
|
||||||
them as structure fields.
|
|
||||||
- There are getters that return a field's value if set,
|
|
||||||
and return the field's default value if unset.
|
|
||||||
The getters work even if the receiver is a nil message.
|
|
||||||
- The zero value for a struct is its correct initialization state.
|
|
||||||
All desired fields must be set before marshaling.
|
|
||||||
- A Reset() method will restore a protobuf struct to its zero state.
|
|
||||||
- Non-repeated fields are pointers to the values; nil means unset.
|
|
||||||
That is, optional or required field int32 f becomes F *int32.
|
|
||||||
- Repeated fields are slices.
|
|
||||||
- Helper functions are available to aid the setting of fields.
|
|
||||||
msg.Foo = proto.String("hello") // set field
|
|
||||||
- Constants are defined to hold the default values of all fields that
|
|
||||||
have them. They have the form Default_StructName_FieldName.
|
|
||||||
Because the getter methods handle defaulted values,
|
|
||||||
direct use of these constants should be rare.
|
|
||||||
- Enums are given type names and maps from names to values.
|
|
||||||
Enum values are prefixed by the enclosing message's name, or by the
|
|
||||||
enum's type name if it is a top-level enum. Enum types have a String
|
|
||||||
method, and a Enum method to assist in message construction.
|
|
||||||
- Nested messages, groups and enums have type names prefixed with the name of
|
|
||||||
the surrounding message type.
|
|
||||||
- Extensions are given descriptor names that start with E_,
|
|
||||||
followed by an underscore-delimited list of the nested messages
|
|
||||||
that contain it (if any) followed by the CamelCased name of the
|
|
||||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
|
||||||
and SetExtension are functions for manipulating extensions.
|
|
||||||
- Oneof field sets are given a single field in their message,
|
|
||||||
with distinguished wrapper types for each possible field value.
|
|
||||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
|
||||||
|
|
||||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
|
||||||
|
|
||||||
- Non-repeated fields of non-message type are values instead of pointers.
|
|
||||||
- Enum types do not get an Enum method.
|
|
||||||
|
|
||||||
The simplest way to describe this is to see an example.
|
|
||||||
Given file test.proto, containing
|
|
||||||
|
|
||||||
package example;
|
|
||||||
|
|
||||||
enum FOO { X = 17; }
|
|
||||||
|
|
||||||
message Test {
|
|
||||||
required string label = 1;
|
|
||||||
optional int32 type = 2 [default=77];
|
|
||||||
repeated int64 reps = 3;
|
|
||||||
optional group OptionalGroup = 4 {
|
|
||||||
required string RequiredField = 5;
|
|
||||||
}
|
|
||||||
oneof union {
|
|
||||||
int32 number = 6;
|
|
||||||
string name = 7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
The resulting file, test.pb.go, is:
|
|
||||||
|
|
||||||
package example
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
type FOO int32
|
|
||||||
const (
|
|
||||||
FOO_X FOO = 17
|
|
||||||
)
|
|
||||||
var FOO_name = map[int32]string{
|
|
||||||
17: "X",
|
|
||||||
}
|
|
||||||
var FOO_value = map[string]int32{
|
|
||||||
"X": 17,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x FOO) Enum() *FOO {
|
|
||||||
p := new(FOO)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
func (x FOO) String() string {
|
|
||||||
return proto.EnumName(FOO_name, int32(x))
|
|
||||||
}
|
|
||||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
|
||||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*x = FOO(value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test struct {
|
|
||||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
|
||||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
|
||||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
|
||||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
|
||||||
// Types that are valid to be assigned to Union:
|
|
||||||
// *Test_Number
|
|
||||||
// *Test_Name
|
|
||||||
Union isTest_Union `protobuf_oneof:"union"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
func (m *Test) Reset() { *m = Test{} }
|
|
||||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Test) ProtoMessage() {}
|
|
||||||
|
|
||||||
type isTest_Union interface {
|
|
||||||
isTest_Union()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test_Number struct {
|
|
||||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
|
||||||
}
|
|
||||||
type Test_Name struct {
|
|
||||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Test_Number) isTest_Union() {}
|
|
||||||
func (*Test_Name) isTest_Union() {}
|
|
||||||
|
|
||||||
func (m *Test) GetUnion() isTest_Union {
|
|
||||||
if m != nil {
|
|
||||||
return m.Union
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
const Default_Test_Type int32 = 77
|
|
||||||
|
|
||||||
func (m *Test) GetLabel() string {
|
|
||||||
if m != nil && m.Label != nil {
|
|
||||||
return *m.Label
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetType() int32 {
|
|
||||||
if m != nil && m.Type != nil {
|
|
||||||
return *m.Type
|
|
||||||
}
|
|
||||||
return Default_Test_Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
|
||||||
if m != nil {
|
|
||||||
return m.Optionalgroup
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test_OptionalGroup struct {
|
|
||||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
|
||||||
}
|
|
||||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
|
||||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
|
||||||
|
|
||||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
|
||||||
if m != nil && m.RequiredField != nil {
|
|
||||||
return *m.RequiredField
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetNumber() int32 {
|
|
||||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
|
||||||
return x.Number
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Test) GetName() string {
|
|
||||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
To create and play with a Test object:
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
pb "./example.pb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
test := &pb.Test{
|
|
||||||
Label: proto.String("hello"),
|
|
||||||
Type: proto.Int32(17),
|
|
||||||
Reps: []int64{1, 2, 3},
|
|
||||||
Optionalgroup: &pb.Test_OptionalGroup{
|
|
||||||
RequiredField: proto.String("good bye"),
|
|
||||||
},
|
|
||||||
Union: &pb.Test_Name{"fred"},
|
|
||||||
}
|
|
||||||
data, err := proto.Marshal(test)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("marshaling error: ", err)
|
|
||||||
}
|
|
||||||
newTest := &pb.Test{}
|
|
||||||
err = proto.Unmarshal(data, newTest)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("unmarshaling error: ", err)
|
|
||||||
}
|
|
||||||
// Now test and newTest contain the same data.
|
|
||||||
if test.GetLabel() != newTest.GetLabel() {
|
|
||||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
|
||||||
}
|
|
||||||
// Use a type switch to determine which oneof was set.
|
|
||||||
switch u := test.Union.(type) {
|
|
||||||
case *pb.Test_Number: // u.Number contains the number.
|
|
||||||
case *pb.Test_Name: // u.Name contains the string.
|
|
||||||
}
|
|
||||||
// etc.
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
|
||||||
// Marshal reports this when a required field is not initialized.
|
|
||||||
// Unmarshal reports this when a required field is missing from the wire data.
|
|
||||||
type RequiredNotSetError struct{ field string }
|
|
||||||
|
|
||||||
func (e *RequiredNotSetError) Error() string {
|
|
||||||
if e.field == "" {
|
|
||||||
return fmt.Sprintf("proto: required field not set")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
|
||||||
}
|
|
||||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidUTF8Error struct{ field string }
|
|
||||||
|
|
||||||
func (e *invalidUTF8Error) Error() string {
|
|
||||||
if e.field == "" {
|
|
||||||
return "proto: invalid UTF-8 detected"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
|
||||||
}
|
|
||||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
|
||||||
// This error should not be exposed to the external API as such errors should
|
|
||||||
// be recreated with the field information.
|
|
||||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
|
||||||
|
|
||||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
|
||||||
// or a InvalidUTF8 error.
|
|
||||||
func isNonFatal(err error) bool {
|
|
||||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type nonFatal struct{ E error }
|
|
||||||
|
|
||||||
// Merge merges err into nf and reports whether it was successful.
|
|
||||||
// Otherwise it returns false for any fatal non-nil errors.
|
|
||||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
|
||||||
if err == nil {
|
|
||||||
return true // not an error
|
|
||||||
}
|
|
||||||
if !isNonFatal(err) {
|
|
||||||
return false // fatal error
|
|
||||||
}
|
|
||||||
if nf.E == nil {
|
|
||||||
nf.E = err // store first instance of non-fatal error
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is implemented by generated protocol buffer messages.
|
|
||||||
type Message interface {
|
|
||||||
Reset()
|
|
||||||
String() string
|
|
||||||
ProtoMessage()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
|
||||||
// protocol buffers. It may be reused between invocations to
|
|
||||||
// reduce memory usage. It is not necessary to use a Buffer;
|
|
||||||
// the global functions Marshal and Unmarshal create a
|
|
||||||
// temporary Buffer and are fine for most applications.
|
|
||||||
type Buffer struct {
|
|
||||||
buf []byte // encode/decode byte stream
|
|
||||||
index int // read point
|
|
||||||
|
|
||||||
deterministic bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
|
||||||
// the contents of the argument slice.
|
|
||||||
func NewBuffer(e []byte) *Buffer {
|
|
||||||
return &Buffer{buf: e}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
|
||||||
func (p *Buffer) Reset() {
|
|
||||||
p.buf = p.buf[0:0] // for reading/writing
|
|
||||||
p.index = 0 // for reading
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuf replaces the internal buffer with the slice,
|
|
||||||
// ready for unmarshaling the contents of the slice.
|
|
||||||
func (p *Buffer) SetBuf(s []byte) {
|
|
||||||
p.buf = s
|
|
||||||
p.index = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the contents of the Buffer.
|
|
||||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
|
||||||
|
|
||||||
// SetDeterministic sets whether to use deterministic serialization.
|
|
||||||
//
|
|
||||||
// Deterministic serialization guarantees that for a given binary, equal
|
|
||||||
// messages will always be serialized to the same bytes. This implies:
|
|
||||||
//
|
|
||||||
// - Repeated serialization of a message will return the same bytes.
|
|
||||||
// - Different processes of the same binary (which may be executing on
|
|
||||||
// different machines) will serialize equal messages to the same bytes.
|
|
||||||
//
|
|
||||||
// Note that the deterministic serialization is NOT canonical across
|
|
||||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
|
||||||
// across different builds with schema changes due to unknown fields.
|
|
||||||
// Users who need canonical serialization (e.g., persistent storage in a
|
|
||||||
// canonical form, fingerprinting, etc.) should define their own
|
|
||||||
// canonicalization specification and implement their own serializer rather
|
|
||||||
// than relying on this API.
|
|
||||||
//
|
|
||||||
// If deterministic serialization is requested, map entries will be sorted
|
|
||||||
// by keys in lexicographical order. This is an implementation detail and
|
|
||||||
// subject to change.
|
|
||||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
|
||||||
p.deterministic = deterministic
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Bool is a helper routine that allocates a new bool value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Bool(v bool) *bool {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 is a helper routine that allocates a new int32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Int32(v int32) *int32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int is a helper routine that allocates a new int32 value
|
|
||||||
// to store v and returns a pointer to it, but unlike Int32
|
|
||||||
// its argument value is an int.
|
|
||||||
func Int(v int) *int32 {
|
|
||||||
p := new(int32)
|
|
||||||
*p = int32(v)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 is a helper routine that allocates a new int64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Int64(v int64) *int64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32 is a helper routine that allocates a new float32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Float32(v float32) *float32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 is a helper routine that allocates a new float64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Float64(v float64) *float64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32 is a helper routine that allocates a new uint32 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Uint32(v uint32) *uint32 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 is a helper routine that allocates a new uint64 value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func Uint64(v uint64) *uint64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// String is a helper routine that allocates a new string value
|
|
||||||
// to store v and returns a pointer to it.
|
|
||||||
func String(v string) *string {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
|
||||||
// by name. Given an enum map and a value, it returns a useful string.
|
|
||||||
func EnumName(m map[int32]string, v int32) string {
|
|
||||||
s, ok := m[v]
|
|
||||||
if ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strconv.Itoa(int(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
|
||||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
|
||||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
|
||||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
|
||||||
//
|
|
||||||
// The function can deal with both JSON representations, numeric and symbolic.
|
|
||||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
|
||||||
if data[0] == '"' {
|
|
||||||
// New style: enums are strings.
|
|
||||||
var repr string
|
|
||||||
if err := json.Unmarshal(data, &repr); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
val, ok := m[repr]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
// Old style: enums are ints.
|
|
||||||
var val int32
|
|
||||||
if err := json.Unmarshal(data, &val); err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
|
||||||
// including the string s. Used in testing but made available for general debugging.
|
|
||||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
|
||||||
var u uint64
|
|
||||||
|
|
||||||
obuf := p.buf
|
|
||||||
index := p.index
|
|
||||||
p.buf = b
|
|
||||||
p.index = 0
|
|
||||||
depth := 0
|
|
||||||
|
|
||||||
fmt.Printf("\n--- %s ---\n", s)
|
|
||||||
|
|
||||||
out:
|
|
||||||
for {
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
fmt.Print(" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
index := p.index
|
|
||||||
if index == len(p.buf) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
op, err := p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
tag := op >> 3
|
|
||||||
wire := op & 7
|
|
||||||
|
|
||||||
switch wire {
|
|
||||||
default:
|
|
||||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
|
||||||
index, tag, wire)
|
|
||||||
break out
|
|
||||||
|
|
||||||
case WireBytes:
|
|
||||||
var r []byte
|
|
||||||
|
|
||||||
r, err = p.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
|
||||||
if len(r) <= 6 {
|
|
||||||
for i := 0; i < len(r); i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
fmt.Printf(" ..")
|
|
||||||
for i := len(r) - 3; i < len(r); i++ {
|
|
||||||
fmt.Printf(" %.2x", r[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
case WireFixed32:
|
|
||||||
u, err = p.DecodeFixed32()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireFixed64:
|
|
||||||
u, err = p.DecodeFixed64()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireVarint:
|
|
||||||
u, err = p.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
|
||||||
|
|
||||||
case WireStartGroup:
|
|
||||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
|
||||||
depth++
|
|
||||||
|
|
||||||
case WireEndGroup:
|
|
||||||
depth--
|
|
||||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if depth != 0 {
|
|
||||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
p.buf = obuf
|
|
||||||
p.index = index
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
|
||||||
// It only modifies fields that are both unset and have defined defaults.
|
|
||||||
// It recursively sets default values in any non-nil sub-messages.
|
|
||||||
func SetDefaults(pb Message) {
|
|
||||||
setDefaults(reflect.ValueOf(pb), true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v is a pointer to a struct.
|
|
||||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
|
||||||
v = v.Elem()
|
|
||||||
|
|
||||||
defaultMu.RLock()
|
|
||||||
dm, ok := defaults[v.Type()]
|
|
||||||
defaultMu.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
dm = buildDefaultMessage(v.Type())
|
|
||||||
defaultMu.Lock()
|
|
||||||
defaults[v.Type()] = dm
|
|
||||||
defaultMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sf := range dm.scalars {
|
|
||||||
f := v.Field(sf.index)
|
|
||||||
if !f.IsNil() {
|
|
||||||
// field already set
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dv := sf.value
|
|
||||||
if dv == nil && !zeros {
|
|
||||||
// no explicit default, and don't want to set zeros
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fptr := f.Addr().Interface() // **T
|
|
||||||
// TODO: Consider batching the allocations we do here.
|
|
||||||
switch sf.kind {
|
|
||||||
case reflect.Bool:
|
|
||||||
b := new(bool)
|
|
||||||
if dv != nil {
|
|
||||||
*b = dv.(bool)
|
|
||||||
}
|
|
||||||
*(fptr.(**bool)) = b
|
|
||||||
case reflect.Float32:
|
|
||||||
f := new(float32)
|
|
||||||
if dv != nil {
|
|
||||||
*f = dv.(float32)
|
|
||||||
}
|
|
||||||
*(fptr.(**float32)) = f
|
|
||||||
case reflect.Float64:
|
|
||||||
f := new(float64)
|
|
||||||
if dv != nil {
|
|
||||||
*f = dv.(float64)
|
|
||||||
}
|
|
||||||
*(fptr.(**float64)) = f
|
|
||||||
case reflect.Int32:
|
|
||||||
// might be an enum
|
|
||||||
if ft := f.Type(); ft != int32PtrType {
|
|
||||||
// enum
|
|
||||||
f.Set(reflect.New(ft.Elem()))
|
|
||||||
if dv != nil {
|
|
||||||
f.Elem().SetInt(int64(dv.(int32)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// int32 field
|
|
||||||
i := new(int32)
|
|
||||||
if dv != nil {
|
|
||||||
*i = dv.(int32)
|
|
||||||
}
|
|
||||||
*(fptr.(**int32)) = i
|
|
||||||
}
|
|
||||||
case reflect.Int64:
|
|
||||||
i := new(int64)
|
|
||||||
if dv != nil {
|
|
||||||
*i = dv.(int64)
|
|
||||||
}
|
|
||||||
*(fptr.(**int64)) = i
|
|
||||||
case reflect.String:
|
|
||||||
s := new(string)
|
|
||||||
if dv != nil {
|
|
||||||
*s = dv.(string)
|
|
||||||
}
|
|
||||||
*(fptr.(**string)) = s
|
|
||||||
case reflect.Uint8:
|
|
||||||
// exceptional case: []byte
|
|
||||||
var b []byte
|
|
||||||
if dv != nil {
|
|
||||||
db := dv.([]byte)
|
|
||||||
b = make([]byte, len(db))
|
|
||||||
copy(b, db)
|
|
||||||
} else {
|
|
||||||
b = []byte{}
|
|
||||||
}
|
|
||||||
*(fptr.(*[]byte)) = b
|
|
||||||
case reflect.Uint32:
|
|
||||||
u := new(uint32)
|
|
||||||
if dv != nil {
|
|
||||||
*u = dv.(uint32)
|
|
||||||
}
|
|
||||||
*(fptr.(**uint32)) = u
|
|
||||||
case reflect.Uint64:
|
|
||||||
u := new(uint64)
|
|
||||||
if dv != nil {
|
|
||||||
*u = dv.(uint64)
|
|
||||||
}
|
|
||||||
*(fptr.(**uint64)) = u
|
|
||||||
default:
|
|
||||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ni := range dm.nested {
|
|
||||||
f := v.Field(ni)
|
|
||||||
// f is *T or []*T or map[T]*T
|
|
||||||
switch f.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if f.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(f, recur, zeros)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
for i := 0; i < f.Len(); i++ {
|
|
||||||
e := f.Index(i)
|
|
||||||
if e.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(e, recur, zeros)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
for _, k := range f.MapKeys() {
|
|
||||||
e := f.MapIndex(k)
|
|
||||||
if e.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setDefaults(e, recur, zeros)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
|
||||||
// with its scalar fields set to their proto-declared non-zero default values.
|
|
||||||
defaultMu sync.RWMutex
|
|
||||||
defaults = make(map[reflect.Type]defaultMessage)
|
|
||||||
|
|
||||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
|
||||||
)
|
|
||||||
|
|
||||||
// defaultMessage represents information about the default values of a message.
|
|
||||||
type defaultMessage struct {
|
|
||||||
scalars []scalarField
|
|
||||||
nested []int // struct field index of nested messages
|
|
||||||
}
|
|
||||||
|
|
||||||
type scalarField struct {
|
|
||||||
index int // struct field index
|
|
||||||
kind reflect.Kind // element type (the T in *T or []T)
|
|
||||||
value interface{} // the proto-declared default value, or nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// t is a struct type.
|
|
||||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
|
||||||
sprop := GetProperties(t)
|
|
||||||
for _, prop := range sprop.Prop {
|
|
||||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
|
||||||
if !ok {
|
|
||||||
// XXX_unrecognized
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ft := t.Field(fi).Type
|
|
||||||
|
|
||||||
sf, nested, err := fieldDefault(ft, prop)
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
log.Print(err)
|
|
||||||
case nested:
|
|
||||||
dm.nested = append(dm.nested, fi)
|
|
||||||
case sf != nil:
|
|
||||||
sf.index = fi
|
|
||||||
dm.scalars = append(dm.scalars, *sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dm
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldDefault returns the scalarField for field type ft.
|
|
||||||
// sf will be nil if the field can not have a default.
|
|
||||||
// nestedMessage will be true if this is a nested message.
|
|
||||||
// Note that sf.index is not set on return.
|
|
||||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
|
||||||
var canHaveDefault bool
|
|
||||||
switch ft.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if ft.Elem().Kind() == reflect.Struct {
|
|
||||||
nestedMessage = true
|
|
||||||
} else {
|
|
||||||
canHaveDefault = true // proto2 scalar field
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
switch ft.Elem().Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
nestedMessage = true // repeated message
|
|
||||||
case reflect.Uint8:
|
|
||||||
canHaveDefault = true // bytes field
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
if ft.Elem().Kind() == reflect.Ptr {
|
|
||||||
nestedMessage = true // map with message values
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !canHaveDefault {
|
|
||||||
if nestedMessage {
|
|
||||||
return nil, true, nil
|
|
||||||
}
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We now know that ft is a pointer or slice.
|
|
||||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
|
||||||
|
|
||||||
// scalar fields without defaults
|
|
||||||
if !prop.HasDefault {
|
|
||||||
return sf, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// a scalar field: either *T or []byte
|
|
||||||
switch ft.Elem().Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
x, err := strconv.ParseBool(prop.Default)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.Float32:
|
|
||||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = float32(x)
|
|
||||||
case reflect.Float64:
|
|
||||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.Int32:
|
|
||||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = int32(x)
|
|
||||||
case reflect.Int64:
|
|
||||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
case reflect.String:
|
|
||||||
sf.value = prop.Default
|
|
||||||
case reflect.Uint8:
|
|
||||||
// []byte (not *uint8)
|
|
||||||
sf.value = []byte(prop.Default)
|
|
||||||
case reflect.Uint32:
|
|
||||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = uint32(x)
|
|
||||||
case reflect.Uint64:
|
|
||||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
|
||||||
}
|
|
||||||
sf.value = x
|
|
||||||
default:
|
|
||||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
return sf, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
|
||||||
// Map fields may have key types of non-float scalars, strings and enums.
|
|
||||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
|
||||||
s := mapKeySorter{vs: vs}
|
|
||||||
|
|
||||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
|
||||||
if len(vs) == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
switch vs[0].Kind() {
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
|
||||||
case reflect.Bool:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
|
||||||
case reflect.String:
|
|
||||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type mapKeySorter struct {
|
|
||||||
vs []reflect.Value
|
|
||||||
less func(a, b reflect.Value) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
|
||||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
|
||||||
func (s mapKeySorter) Less(i, j int) bool {
|
|
||||||
return s.less(s.vs[i], s.vs[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// isProto3Zero reports whether v is a zero proto3 value.
|
|
||||||
func isProto3Zero(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint32, reflect.Uint64:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.String:
|
|
||||||
return v.String() == ""
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion3 = true
|
|
||||||
|
|
||||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion2 = true
|
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
ProtoPackageIsVersion1 = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
|
||||||
// This type is not intended to be used by non-generated code.
|
|
||||||
// This type is not subject to any compatibility guarantee.
|
|
||||||
type InternalMessageInfo struct {
|
|
||||||
marshal *marshalInfo
|
|
||||||
unmarshal *unmarshalInfo
|
|
||||||
merge *mergeInfo
|
|
||||||
discard *discardInfo
|
|
||||||
}
|
|
||||||
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
@@ -1,181 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Support for message sets.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
|
||||||
// A message type ID is required for storing a protocol buffer in a message set.
|
|
||||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
|
||||||
|
|
||||||
// The first two types (_MessageSet_Item and messageSet)
|
|
||||||
// model what the protocol compiler produces for the following protocol message:
|
|
||||||
// message MessageSet {
|
|
||||||
// repeated group Item = 1 {
|
|
||||||
// required int32 type_id = 2;
|
|
||||||
// required string message = 3;
|
|
||||||
// };
|
|
||||||
// }
|
|
||||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
|
||||||
// because that would introduce a circular dependency between it and this package.
|
|
||||||
|
|
||||||
type _MessageSet_Item struct {
|
|
||||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
|
||||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type messageSet struct {
|
|
||||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
|
||||||
XXX_unrecognized []byte
|
|
||||||
// TODO: caching?
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure messageSet is a Message.
|
|
||||||
var _ Message = (*messageSet)(nil)
|
|
||||||
|
|
||||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
|
||||||
// that may be stored in a MessageSet.
|
|
||||||
type messageTypeIder interface {
|
|
||||||
MessageTypeId() int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
|
||||||
mti, ok := pb.(messageTypeIder)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
id := mti.MessageTypeId()
|
|
||||||
for _, item := range ms.Item {
|
|
||||||
if *item.TypeId == id {
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Has(pb Message) bool {
|
|
||||||
return ms.find(pb) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
|
||||||
if item := ms.find(pb); item != nil {
|
|
||||||
return Unmarshal(item.Message, pb)
|
|
||||||
}
|
|
||||||
if _, ok := pb.(messageTypeIder); !ok {
|
|
||||||
return errNoMessageTypeID
|
|
||||||
}
|
|
||||||
return nil // TODO: return error instead?
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Marshal(pb Message) error {
|
|
||||||
msg, err := Marshal(pb)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if item := ms.find(pb); item != nil {
|
|
||||||
// reuse existing item
|
|
||||||
item.Message = msg
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mti, ok := pb.(messageTypeIder)
|
|
||||||
if !ok {
|
|
||||||
return errNoMessageTypeID
|
|
||||||
}
|
|
||||||
|
|
||||||
mtid := mti.MessageTypeId()
|
|
||||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
|
||||||
TypeId: &mtid,
|
|
||||||
Message: msg,
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
|
||||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
|
||||||
func (*messageSet) ProtoMessage() {}
|
|
||||||
|
|
||||||
// Support for the message_set_wire_format message option.
|
|
||||||
|
|
||||||
func skipVarint(buf []byte) []byte {
|
|
||||||
i := 0
|
|
||||||
for ; buf[i]&0x80 != 0; i++ {
|
|
||||||
}
|
|
||||||
return buf[i+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
|
||||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
|
||||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
|
||||||
var m map[int32]Extension
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
m = exts.extensionsWrite()
|
|
||||||
case map[int32]Extension:
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := new(messageSet)
|
|
||||||
if err := Unmarshal(buf, ms); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, item := range ms.Item {
|
|
||||||
id := *item.TypeId
|
|
||||||
msg := item.Message
|
|
||||||
|
|
||||||
// Restore wire type and field number varint, plus length varint.
|
|
||||||
// Be careful to preserve duplicate items.
|
|
||||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
|
||||||
if ext, ok := m[id]; ok {
|
|
||||||
// Existing data; rip off the tag and length varint
|
|
||||||
// so we join the new data correctly.
|
|
||||||
// We can assume that ext.enc is set because we are unmarshaling.
|
|
||||||
o := ext.enc[len(b):] // skip wire type and field number
|
|
||||||
_, n := DecodeVarint(o) // calculate length of length varint
|
|
||||||
o = o[n:] // skip length varint
|
|
||||||
msg = append(o, msg...) // join old data and new data
|
|
||||||
}
|
|
||||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
|
||||||
b = append(b, msg...)
|
|
||||||
|
|
||||||
m[id] = Extension{enc: b}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
@@ -1,360 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// +build purego appengine js
|
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
|
||||||
// be used on App Engine.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const unsafeAllowed = false
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a pointer.
|
|
||||||
// In this implementation, a field is identified by the sequence of field indices
|
|
||||||
// passed to reflect's FieldByIndex.
|
|
||||||
type field []int
|
|
||||||
|
|
||||||
// toField returns a field equivalent to the given reflect field.
|
|
||||||
func toField(f *reflect.StructField) field {
|
|
||||||
return f.Index
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidField is an invalid field identifier.
|
|
||||||
var invalidField = field(nil)
|
|
||||||
|
|
||||||
// zeroField is a noop when calling pointer.offset.
|
|
||||||
var zeroField = field([]int{})
|
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
|
||||||
func (f field) IsValid() bool { return f != nil }
|
|
||||||
|
|
||||||
// The pointer type is for the table-driven decoder.
|
|
||||||
// The implementation here uses a reflect.Value of pointer type to
|
|
||||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
|
||||||
// instead of reflect to implement the same (but faster) interface.
|
|
||||||
type pointer struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// toPointer converts an interface of pointer type to a pointer
|
|
||||||
// that points to the same target.
|
|
||||||
func toPointer(i *Message) pointer {
|
|
||||||
return pointer{v: reflect.ValueOf(*i)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
|
||||||
// the interface data.
|
|
||||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
|
||||||
v := reflect.ValueOf(*i)
|
|
||||||
u := reflect.New(v.Type())
|
|
||||||
u.Elem().Set(v)
|
|
||||||
if deref {
|
|
||||||
u = u.Elem()
|
|
||||||
}
|
|
||||||
return pointer{v: u}
|
|
||||||
}
|
|
||||||
|
|
||||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
|
||||||
func valToPointer(v reflect.Value) pointer {
|
|
||||||
return pointer{v: v}
|
|
||||||
}
|
|
||||||
|
|
||||||
// offset converts from a pointer to a structure to a pointer to
|
|
||||||
// one of its fields.
|
|
||||||
func (p pointer) offset(f field) pointer {
|
|
||||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) isNil() bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// grow updates the slice s in place to make it one element longer.
|
|
||||||
// s must be addressable.
|
|
||||||
// Returns the (addressable) new element.
|
|
||||||
func grow(s reflect.Value) reflect.Value {
|
|
||||||
n, m := s.Len(), s.Cap()
|
|
||||||
if n < m {
|
|
||||||
s.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
|
||||||
}
|
|
||||||
return s.Index(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toInt64() *int64 {
|
|
||||||
return p.v.Interface().(*int64)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Ptr() **int64 {
|
|
||||||
return p.v.Interface().(**int64)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Slice() *[]int64 {
|
|
||||||
return p.v.Interface().(*[]int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
|
||||||
|
|
||||||
func (p pointer) toInt32() *int32 {
|
|
||||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
|
||||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
|
||||||
/*
|
|
||||||
func (p pointer) toInt32Ptr() **int32 {
|
|
||||||
return p.v.Interface().(**int32)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32Slice() *[]int32 {
|
|
||||||
return p.v.Interface().(*[]int32)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
func (p pointer) getInt32Ptr() *int32 {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
return p.v.Elem().Interface().(*int32)
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
|
||||||
}
|
|
||||||
func (p pointer) setInt32Ptr(v int32) {
|
|
||||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
|
||||||
// Then assign it to a **int32 or **enum.
|
|
||||||
// Note: we can convert *int32 to *enum, but we can't convert
|
|
||||||
// **int32 to **enum!
|
|
||||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInt32Slice copies []int32 from p as a new slice.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) getInt32Slice() []int32 {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
return p.v.Elem().Interface().([]int32)
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
// Allocate a []int32, then assign []enum's values into it.
|
|
||||||
// Note: we can't convert []enum to []int32.
|
|
||||||
slice := p.v.Elem()
|
|
||||||
s := make([]int32, slice.Len())
|
|
||||||
for i := 0; i < slice.Len(); i++ {
|
|
||||||
s[i] = int32(slice.Index(i).Int())
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// setInt32Slice copies []int32 into p as a new slice.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) setInt32Slice(v []int32) {
|
|
||||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
|
||||||
// raw int32 type
|
|
||||||
p.v.Elem().Set(reflect.ValueOf(v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// an enum
|
|
||||||
// Allocate a []enum, then assign []int32's values into it.
|
|
||||||
// Note: we can't convert []enum to []int32.
|
|
||||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
|
||||||
for i, x := range v {
|
|
||||||
slice.Index(i).SetInt(int64(x))
|
|
||||||
}
|
|
||||||
p.v.Elem().Set(slice)
|
|
||||||
}
|
|
||||||
func (p pointer) appendInt32Slice(v int32) {
|
|
||||||
grow(p.v.Elem()).SetInt(int64(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toUint64() *uint64 {
|
|
||||||
return p.v.Interface().(*uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Ptr() **uint64 {
|
|
||||||
return p.v.Interface().(**uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Slice() *[]uint64 {
|
|
||||||
return p.v.Interface().(*[]uint64)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32() *uint32 {
|
|
||||||
return p.v.Interface().(*uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Ptr() **uint32 {
|
|
||||||
return p.v.Interface().(**uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Slice() *[]uint32 {
|
|
||||||
return p.v.Interface().(*[]uint32)
|
|
||||||
}
|
|
||||||
func (p pointer) toBool() *bool {
|
|
||||||
return p.v.Interface().(*bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolPtr() **bool {
|
|
||||||
return p.v.Interface().(**bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolSlice() *[]bool {
|
|
||||||
return p.v.Interface().(*[]bool)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64() *float64 {
|
|
||||||
return p.v.Interface().(*float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Ptr() **float64 {
|
|
||||||
return p.v.Interface().(**float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Slice() *[]float64 {
|
|
||||||
return p.v.Interface().(*[]float64)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32() *float32 {
|
|
||||||
return p.v.Interface().(*float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Ptr() **float32 {
|
|
||||||
return p.v.Interface().(**float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Slice() *[]float32 {
|
|
||||||
return p.v.Interface().(*[]float32)
|
|
||||||
}
|
|
||||||
func (p pointer) toString() *string {
|
|
||||||
return p.v.Interface().(*string)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringPtr() **string {
|
|
||||||
return p.v.Interface().(**string)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringSlice() *[]string {
|
|
||||||
return p.v.Interface().(*[]string)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytes() *[]byte {
|
|
||||||
return p.v.Interface().(*[]byte)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytesSlice() *[][]byte {
|
|
||||||
return p.v.Interface().(*[][]byte)
|
|
||||||
}
|
|
||||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
|
||||||
return p.v.Interface().(*XXX_InternalExtensions)
|
|
||||||
}
|
|
||||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
|
||||||
return p.v.Interface().(*map[int32]Extension)
|
|
||||||
}
|
|
||||||
func (p pointer) getPointer() pointer {
|
|
||||||
return pointer{v: p.v.Elem()}
|
|
||||||
}
|
|
||||||
func (p pointer) setPointer(q pointer) {
|
|
||||||
p.v.Elem().Set(q.v)
|
|
||||||
}
|
|
||||||
func (p pointer) appendPointer(q pointer) {
|
|
||||||
grow(p.v.Elem()).Set(q.v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointerSlice copies []*T from p as a new []pointer.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) getPointerSlice() []pointer {
|
|
||||||
if p.v.IsNil() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
n := p.v.Elem().Len()
|
|
||||||
s := make([]pointer, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointerSlice copies []pointer into p as a new []*T.
|
|
||||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
|
||||||
func (p pointer) setPointerSlice(v []pointer) {
|
|
||||||
if v == nil {
|
|
||||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
|
||||||
for _, p := range v {
|
|
||||||
s = reflect.Append(s, p.v)
|
|
||||||
}
|
|
||||||
p.v.Elem().Set(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInterfacePointer returns a pointer that points to the
|
|
||||||
// interface data of the interface pointed by p.
|
|
||||||
func (p pointer) getInterfacePointer() pointer {
|
|
||||||
if p.v.Elem().IsNil() {
|
|
||||||
return pointer{v: p.v.Elem()}
|
|
||||||
}
|
|
||||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
|
||||||
// TODO: check that p.v.Type().Elem() == t?
|
|
||||||
return p.v
|
|
||||||
}
|
|
||||||
|
|
||||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
|
||||||
atomicLock.Lock()
|
|
||||||
defer atomicLock.Unlock()
|
|
||||||
*p = v
|
|
||||||
}
|
|
||||||
|
|
||||||
var atomicLock sync.Mutex
|
|
||||||
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
@@ -1,313 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// +build !purego,!appengine,!js
|
|
||||||
|
|
||||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const unsafeAllowed = true
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a pointer.
|
|
||||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
|
||||||
type field uintptr
|
|
||||||
|
|
||||||
// toField returns a field equivalent to the given reflect field.
|
|
||||||
func toField(f *reflect.StructField) field {
|
|
||||||
return field(f.Offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidField is an invalid field identifier.
|
|
||||||
const invalidField = ^field(0)
|
|
||||||
|
|
||||||
// zeroField is a noop when calling pointer.offset.
|
|
||||||
const zeroField = field(0)
|
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
|
||||||
func (f field) IsValid() bool {
|
|
||||||
return f != invalidField
|
|
||||||
}
|
|
||||||
|
|
||||||
// The pointer type below is for the new table-driven encoder/decoder.
|
|
||||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
|
||||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
|
||||||
// the same (but slower) interface.
|
|
||||||
type pointer struct {
|
|
||||||
p unsafe.Pointer
|
|
||||||
}
|
|
||||||
|
|
||||||
// size of pointer
|
|
||||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
|
||||||
|
|
||||||
// toPointer converts an interface of pointer type to a pointer
|
|
||||||
// that points to the same target.
|
|
||||||
func toPointer(i *Message) pointer {
|
|
||||||
// Super-tricky - read pointer out of data word of interface value.
|
|
||||||
// Saves ~25ns over the equivalent:
|
|
||||||
// return valToPointer(reflect.ValueOf(*i))
|
|
||||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toAddrPointer converts an interface to a pointer that points to
|
|
||||||
// the interface data.
|
|
||||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
|
||||||
// Super-tricky - read or get the address of data word of interface value.
|
|
||||||
if isptr {
|
|
||||||
// The interface is of pointer type, thus it is a direct interface.
|
|
||||||
// The data word is the pointer data itself. We take its address.
|
|
||||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
|
||||||
} else {
|
|
||||||
// The interface is not of pointer type. The data word is the pointer
|
|
||||||
// to the data.
|
|
||||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
|
||||||
}
|
|
||||||
if deref {
|
|
||||||
p.p = *(*unsafe.Pointer)(p.p)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
|
||||||
func valToPointer(v reflect.Value) pointer {
|
|
||||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
|
||||||
}
|
|
||||||
|
|
||||||
// offset converts from a pointer to a structure to a pointer to
|
|
||||||
// one of its fields.
|
|
||||||
func (p pointer) offset(f field) pointer {
|
|
||||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
|
||||||
// this to no longer be inlineable, which is a serious performance cost.
|
|
||||||
/*
|
|
||||||
if !f.IsValid() {
|
|
||||||
panic("invalid field")
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) isNil() bool {
|
|
||||||
return p.p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toInt64() *int64 {
|
|
||||||
return (*int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Ptr() **int64 {
|
|
||||||
return (**int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt64Slice() *[]int64 {
|
|
||||||
return (*[]int64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32() *int32 {
|
|
||||||
return (*int32)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
|
||||||
/*
|
|
||||||
func (p pointer) toInt32Ptr() **int32 {
|
|
||||||
return (**int32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toInt32Slice() *[]int32 {
|
|
||||||
return (*[]int32)(p.p)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
func (p pointer) getInt32Ptr() *int32 {
|
|
||||||
return *(**int32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) setInt32Ptr(v int32) {
|
|
||||||
*(**int32)(p.p) = &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInt32Slice loads a []int32 from p.
|
|
||||||
// The value returned is aliased with the original slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) getInt32Slice() []int32 {
|
|
||||||
return *(*[]int32)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setInt32Slice stores a []int32 to p.
|
|
||||||
// The value set is aliased with the input slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) setInt32Slice(v []int32) {
|
|
||||||
*(*[]int32)(p.p) = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
|
||||||
func (p pointer) appendInt32Slice(v int32) {
|
|
||||||
s := (*[]int32)(p.p)
|
|
||||||
*s = append(*s, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p pointer) toUint64() *uint64 {
|
|
||||||
return (*uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Ptr() **uint64 {
|
|
||||||
return (**uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint64Slice() *[]uint64 {
|
|
||||||
return (*[]uint64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32() *uint32 {
|
|
||||||
return (*uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Ptr() **uint32 {
|
|
||||||
return (**uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toUint32Slice() *[]uint32 {
|
|
||||||
return (*[]uint32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBool() *bool {
|
|
||||||
return (*bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolPtr() **bool {
|
|
||||||
return (**bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBoolSlice() *[]bool {
|
|
||||||
return (*[]bool)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64() *float64 {
|
|
||||||
return (*float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Ptr() **float64 {
|
|
||||||
return (**float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat64Slice() *[]float64 {
|
|
||||||
return (*[]float64)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32() *float32 {
|
|
||||||
return (*float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Ptr() **float32 {
|
|
||||||
return (**float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toFloat32Slice() *[]float32 {
|
|
||||||
return (*[]float32)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toString() *string {
|
|
||||||
return (*string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringPtr() **string {
|
|
||||||
return (**string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toStringSlice() *[]string {
|
|
||||||
return (*[]string)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytes() *[]byte {
|
|
||||||
return (*[]byte)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toBytesSlice() *[][]byte {
|
|
||||||
return (*[][]byte)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
|
||||||
return (*XXX_InternalExtensions)(p.p)
|
|
||||||
}
|
|
||||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
|
||||||
return (*map[int32]Extension)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointerSlice loads []*T from p as a []pointer.
|
|
||||||
// The value returned is aliased with the original slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) getPointerSlice() []pointer {
|
|
||||||
// Super-tricky - p should point to a []*T where T is a
|
|
||||||
// message type. We load it as []pointer.
|
|
||||||
return *(*[]pointer)(p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointerSlice stores []pointer into p as a []*T.
|
|
||||||
// The value set is aliased with the input slice.
|
|
||||||
// This behavior differs from the implementation in pointer_reflect.go.
|
|
||||||
func (p pointer) setPointerSlice(v []pointer) {
|
|
||||||
// Super-tricky - p should point to a []*T where T is a
|
|
||||||
// message type. We store it as []pointer.
|
|
||||||
*(*[]pointer)(p.p) = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPointer loads the pointer at p and returns it.
|
|
||||||
func (p pointer) getPointer() pointer {
|
|
||||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPointer stores the pointer q at p.
|
|
||||||
func (p pointer) setPointer(q pointer) {
|
|
||||||
*(*unsafe.Pointer)(p.p) = q.p
|
|
||||||
}
|
|
||||||
|
|
||||||
// append q to the slice pointed to by p.
|
|
||||||
func (p pointer) appendPointer(q pointer) {
|
|
||||||
s := (*[]unsafe.Pointer)(p.p)
|
|
||||||
*s = append(*s, q.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getInterfacePointer returns a pointer that points to the
|
|
||||||
// interface data of the interface pointed by p.
|
|
||||||
func (p pointer) getInterfacePointer() pointer {
|
|
||||||
// Super-tricky - read pointer out of data word of interface value.
|
|
||||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
|
||||||
}
|
|
||||||
|
|
||||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
|
||||||
// object of type t stored at p.
|
|
||||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
|
||||||
return reflect.NewAt(t, p.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
|
||||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
|
||||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
|
||||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
|
||||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
|
||||||
}
|
|
||||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
|
||||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
|
||||||
}
|
|
||||||
640
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
640
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@@ -1,162 +1,104 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
/*
|
|
||||||
* Routines for encoding data into the wire format for protocol buffers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
)
|
)
|
||||||
|
|
||||||
const debug bool = false
|
// StructProperties represents protocol buffer type information for a
|
||||||
|
// generated protobuf message in the open-struct API.
|
||||||
// Constants that identify the encoding of a value on the wire.
|
//
|
||||||
const (
|
// Deprecated: Do not use.
|
||||||
WireVarint = 0
|
|
||||||
WireFixed64 = 1
|
|
||||||
WireBytes = 2
|
|
||||||
WireStartGroup = 3
|
|
||||||
WireEndGroup = 4
|
|
||||||
WireFixed32 = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
|
||||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
|
||||||
// numbers.
|
|
||||||
type tagMap struct {
|
|
||||||
fastTags []int
|
|
||||||
slowTags map[int]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
|
||||||
// the tagMap slice rather than its map.
|
|
||||||
const tagMapFastLimit = 1024
|
|
||||||
|
|
||||||
func (p *tagMap) get(t int) (int, bool) {
|
|
||||||
if t > 0 && t < tagMapFastLimit {
|
|
||||||
if t >= len(p.fastTags) {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
fi := p.fastTags[t]
|
|
||||||
return fi, fi >= 0
|
|
||||||
}
|
|
||||||
fi, ok := p.slowTags[t]
|
|
||||||
return fi, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *tagMap) put(t int, fi int) {
|
|
||||||
if t > 0 && t < tagMapFastLimit {
|
|
||||||
for len(p.fastTags) < t+1 {
|
|
||||||
p.fastTags = append(p.fastTags, -1)
|
|
||||||
}
|
|
||||||
p.fastTags[t] = fi
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p.slowTags == nil {
|
|
||||||
p.slowTags = make(map[int]int)
|
|
||||||
}
|
|
||||||
p.slowTags[t] = fi
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructProperties represents properties for all the fields of a struct.
|
|
||||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
|
||||||
type StructProperties struct {
|
type StructProperties struct {
|
||||||
Prop []*Properties // properties for each field
|
// Prop are the properties for each field.
|
||||||
reqCount int // required count
|
//
|
||||||
decoderTags tagMap // map from proto tag to struct field number
|
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||||
decoderOrigNames map[string]int // map from original name to struct field number
|
// single Properties representing the parent oneof held here.
|
||||||
order []int // list of struct field numbers in tag order
|
//
|
||||||
|
// The order of Prop matches the order of fields in the Go struct.
|
||||||
|
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||||
|
// in the Properties.Name and must be ignored by the user.
|
||||||
|
Prop []*Properties
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
// OneofTypes contains information about the oneof fields in this message.
|
||||||
// It is keyed by the original name of a field.
|
// It is keyed by the protobuf field name.
|
||||||
OneofTypes map[string]*OneofProperties
|
OneofTypes map[string]*OneofProperties
|
||||||
}
|
}
|
||||||
|
|
||||||
// OneofProperties represents information about a specific field in a oneof.
|
// Properties represents the type information for a protobuf message field.
|
||||||
type OneofProperties struct {
|
//
|
||||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
// Deprecated: Do not use.
|
||||||
Field int // struct field number of the containing oneof in the message
|
|
||||||
Prop *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
|
||||||
// See encode.go, (*Buffer).enc_struct.
|
|
||||||
|
|
||||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
|
||||||
func (sp *StructProperties) Less(i, j int) bool {
|
|
||||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
|
||||||
}
|
|
||||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
|
||||||
|
|
||||||
// Properties represents the protocol-specific behavior of a single struct field.
|
|
||||||
type Properties struct {
|
type Properties struct {
|
||||||
Name string // name of the field, for error messages
|
// Name is a placeholder name with little meaningful semantic value.
|
||||||
OrigName string // original name before protocol compiler (always set)
|
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||||
JSONName string // name to use for JSON; determined by protoc
|
Name string
|
||||||
|
// OrigName is the protobuf field name or oneof name.
|
||||||
|
OrigName string
|
||||||
|
// JSONName is the JSON name for the protobuf field.
|
||||||
|
JSONName string
|
||||||
|
// Enum is a placeholder name for enums.
|
||||||
|
// For historical reasons, this is neither the Go name for the enum,
|
||||||
|
// nor the protobuf name for the enum.
|
||||||
|
Enum string // Deprecated: Do not use.
|
||||||
|
// Weak contains the full name of the weakly referenced message.
|
||||||
|
Weak string
|
||||||
|
// Wire is a string representation of the wire type.
|
||||||
Wire string
|
Wire string
|
||||||
|
// WireType is the protobuf wire type for the field.
|
||||||
WireType int
|
WireType int
|
||||||
|
// Tag is the protobuf field number.
|
||||||
Tag int
|
Tag int
|
||||||
|
// Required reports whether this is a required field.
|
||||||
Required bool
|
Required bool
|
||||||
|
// Optional reports whether this is a optional field.
|
||||||
Optional bool
|
Optional bool
|
||||||
|
// Repeated reports whether this is a repeated field.
|
||||||
Repeated bool
|
Repeated bool
|
||||||
Packed bool // relevant for repeated primitives only
|
// Packed reports whether this is a packed repeated field of scalars.
|
||||||
Enum string // set for enum types only
|
Packed bool
|
||||||
proto3 bool // whether this is known to be a proto3 field
|
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||||
oneof bool // whether this is a oneof field
|
Proto3 bool
|
||||||
|
// Oneof reports whether this field belongs within a oneof.
|
||||||
|
Oneof bool
|
||||||
|
|
||||||
Default string // default value
|
// Default is the default value in string form.
|
||||||
HasDefault bool // whether an explicit default was provided
|
Default string
|
||||||
|
// HasDefault reports whether the field has a default value.
|
||||||
|
HasDefault bool
|
||||||
|
|
||||||
stype reflect.Type // set for struct types only
|
// MapKeyProp is the properties for the key field for a map field.
|
||||||
sprop *StructProperties // set for struct types only
|
MapKeyProp *Properties
|
||||||
|
// MapValProp is the properties for the value field for a map field.
|
||||||
|
MapValProp *Properties
|
||||||
|
}
|
||||||
|
|
||||||
mtype reflect.Type // set for map types only
|
// OneofProperties represents the type information for a protobuf oneof.
|
||||||
MapKeyProp *Properties // set for map types only
|
//
|
||||||
MapValProp *Properties // set for map types only
|
// Deprecated: Do not use.
|
||||||
|
type OneofProperties struct {
|
||||||
|
// Type is a pointer to the generated wrapper type for the field value.
|
||||||
|
// This is nil for messages that are not in the open-struct API.
|
||||||
|
Type reflect.Type
|
||||||
|
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||||
|
Field int
|
||||||
|
// Prop is the properties for the field.
|
||||||
|
Prop *Properties
|
||||||
}
|
}
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
// String formats the properties in the protobuf struct field tag style.
|
||||||
func (p *Properties) String() string {
|
func (p *Properties) String() string {
|
||||||
s := p.Wire
|
s := p.Wire
|
||||||
s += ","
|
s += "," + strconv.Itoa(p.Tag)
|
||||||
s += strconv.Itoa(p.Tag)
|
|
||||||
if p.Required {
|
if p.Required {
|
||||||
s += ",req"
|
s += ",req"
|
||||||
}
|
}
|
||||||
@@ -170,18 +112,21 @@ func (p *Properties) String() string {
|
|||||||
s += ",packed"
|
s += ",packed"
|
||||||
}
|
}
|
||||||
s += ",name=" + p.OrigName
|
s += ",name=" + p.OrigName
|
||||||
if p.JSONName != p.OrigName {
|
if p.JSONName != "" {
|
||||||
s += ",json=" + p.JSONName
|
s += ",json=" + p.JSONName
|
||||||
}
|
}
|
||||||
if p.proto3 {
|
|
||||||
s += ",proto3"
|
|
||||||
}
|
|
||||||
if p.oneof {
|
|
||||||
s += ",oneof"
|
|
||||||
}
|
|
||||||
if len(p.Enum) > 0 {
|
if len(p.Enum) > 0 {
|
||||||
s += ",enum=" + p.Enum
|
s += ",enum=" + p.Enum
|
||||||
}
|
}
|
||||||
|
if len(p.Weak) > 0 {
|
||||||
|
s += ",weak=" + p.Weak
|
||||||
|
}
|
||||||
|
if p.Proto3 {
|
||||||
|
s += ",proto3"
|
||||||
|
}
|
||||||
|
if p.Oneof {
|
||||||
|
s += ",oneof"
|
||||||
|
}
|
||||||
if p.HasDefault {
|
if p.HasDefault {
|
||||||
s += ",def=" + p.Default
|
s += ",def=" + p.Default
|
||||||
}
|
}
|
||||||
@@ -189,356 +134,173 @@ func (p *Properties) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||||
func (p *Properties) Parse(s string) {
|
func (p *Properties) Parse(tag string) {
|
||||||
// "bytes,49,opt,name=foo,def=hello!"
|
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
for len(tag) > 0 {
|
||||||
if len(fields) < 2 {
|
i := strings.IndexByte(tag, ',')
|
||||||
log.Printf("proto: tag has too few fields: %q", s)
|
if i < 0 {
|
||||||
return
|
i = len(tag)
|
||||||
}
|
}
|
||||||
|
switch s := tag[:i]; {
|
||||||
p.Wire = fields[0]
|
case strings.HasPrefix(s, "name="):
|
||||||
switch p.Wire {
|
p.OrigName = s[len("name="):]
|
||||||
case "varint":
|
case strings.HasPrefix(s, "json="):
|
||||||
p.WireType = WireVarint
|
p.JSONName = s[len("json="):]
|
||||||
case "fixed32":
|
case strings.HasPrefix(s, "enum="):
|
||||||
p.WireType = WireFixed32
|
p.Enum = s[len("enum="):]
|
||||||
case "fixed64":
|
case strings.HasPrefix(s, "weak="):
|
||||||
p.WireType = WireFixed64
|
p.Weak = s[len("weak="):]
|
||||||
case "zigzag32":
|
case strings.Trim(s, "0123456789") == "":
|
||||||
p.WireType = WireVarint
|
n, _ := strconv.ParseUint(s, 10, 32)
|
||||||
case "zigzag64":
|
p.Tag = int(n)
|
||||||
p.WireType = WireVarint
|
case s == "opt":
|
||||||
case "bytes", "group":
|
|
||||||
p.WireType = WireBytes
|
|
||||||
// no numeric converter for non-numeric types
|
|
||||||
default:
|
|
||||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
p.Tag, err = strconv.Atoi(fields[1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for i := 2; i < len(fields); i++ {
|
|
||||||
f := fields[i]
|
|
||||||
switch {
|
|
||||||
case f == "req":
|
|
||||||
p.Required = true
|
|
||||||
case f == "opt":
|
|
||||||
p.Optional = true
|
p.Optional = true
|
||||||
case f == "rep":
|
case s == "req":
|
||||||
|
p.Required = true
|
||||||
|
case s == "rep":
|
||||||
p.Repeated = true
|
p.Repeated = true
|
||||||
case f == "packed":
|
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireVarint
|
||||||
|
case s == "fixed32":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireFixed32
|
||||||
|
case s == "fixed64":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireFixed64
|
||||||
|
case s == "bytes":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireBytes
|
||||||
|
case s == "group":
|
||||||
|
p.Wire = s
|
||||||
|
p.WireType = WireStartGroup
|
||||||
|
case s == "packed":
|
||||||
p.Packed = true
|
p.Packed = true
|
||||||
case strings.HasPrefix(f, "name="):
|
case s == "proto3":
|
||||||
p.OrigName = f[5:]
|
p.Proto3 = true
|
||||||
case strings.HasPrefix(f, "json="):
|
case s == "oneof":
|
||||||
p.JSONName = f[5:]
|
p.Oneof = true
|
||||||
case strings.HasPrefix(f, "enum="):
|
case strings.HasPrefix(s, "def="):
|
||||||
p.Enum = f[5:]
|
// The default tag is special in that everything afterwards is the
|
||||||
case f == "proto3":
|
// default regardless of the presence of commas.
|
||||||
p.proto3 = true
|
|
||||||
case f == "oneof":
|
|
||||||
p.oneof = true
|
|
||||||
case strings.HasPrefix(f, "def="):
|
|
||||||
p.HasDefault = true
|
p.HasDefault = true
|
||||||
p.Default = f[4:] // rest of string
|
p.Default, i = tag[len("def="):], len(tag)
|
||||||
if i+1 < len(fields) {
|
|
||||||
// Commas aren't escaped, and def is always last.
|
|
||||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
tag = strings.TrimPrefix(tag[i:], ",")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
|
||||||
|
|
||||||
// setFieldProps initializes the field properties for submessages and maps.
|
|
||||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
|
||||||
switch t1 := typ; t1.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if t1.Elem().Kind() == reflect.Struct {
|
|
||||||
p.stype = t1.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
|
||||||
p.stype = t2.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
p.mtype = t1
|
|
||||||
p.MapKeyProp = &Properties{}
|
|
||||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
|
||||||
p.MapValProp = &Properties{}
|
|
||||||
vtype := p.mtype.Elem()
|
|
||||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
|
||||||
// The value type is not a message (*T) or bytes ([]byte),
|
|
||||||
// so we need encoders for the pointer to this type.
|
|
||||||
vtype = reflect.PtrTo(vtype)
|
|
||||||
}
|
|
||||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.stype != nil {
|
|
||||||
if lockGetProp {
|
|
||||||
p.sprop = GetProperties(p.stype)
|
|
||||||
} else {
|
|
||||||
p.sprop = getPropertiesLocked(p.stype)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
|
||||||
)
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
// Init populates the properties from a protocol buffer struct tag.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not use.
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||||
p.init(typ, name, tag, f, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
|
||||||
// "bytes,49,opt,def=hello!"
|
|
||||||
p.Name = name
|
p.Name = name
|
||||||
p.OrigName = name
|
p.OrigName = name
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Parse(tag)
|
p.Parse(tag)
|
||||||
p.setFieldProps(typ, f, lockGetProp)
|
|
||||||
|
if typ != nil && typ.Kind() == reflect.Map {
|
||||||
|
p.MapKeyProp = new(Properties)
|
||||||
|
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||||
|
p.MapValProp = new(Properties)
|
||||||
|
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||||
propertiesMu sync.RWMutex
|
|
||||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetProperties returns the list of properties for the type represented by t.
|
// GetProperties returns the list of properties for the type represented by t,
|
||||||
// t must represent a generated struct type of a protocol message.
|
// which must be a generated protocol buffer message in the open-struct API,
|
||||||
|
// where protobuf message fields are represented by exported Go struct fields.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protobuf reflection instead.
|
||||||
func GetProperties(t reflect.Type) *StructProperties {
|
func GetProperties(t reflect.Type) *StructProperties {
|
||||||
if t.Kind() != reflect.Struct {
|
if p, ok := propertiesCache.Load(t); ok {
|
||||||
panic("proto: type must have kind struct")
|
return p.(*StructProperties)
|
||||||
}
|
}
|
||||||
|
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||||
// Most calls to GetProperties in a long-running program will be
|
return p.(*StructProperties)
|
||||||
// retrieving details for types we have seen before.
|
|
||||||
propertiesMu.RLock()
|
|
||||||
sprop, ok := propertiesMap[t]
|
|
||||||
propertiesMu.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return sprop
|
|
||||||
}
|
|
||||||
|
|
||||||
propertiesMu.Lock()
|
|
||||||
sprop = getPropertiesLocked(t)
|
|
||||||
propertiesMu.Unlock()
|
|
||||||
return sprop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
func newProperties(t reflect.Type) *StructProperties {
|
||||||
oneofFuncsIface interface {
|
if t.Kind() != reflect.Struct {
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||||
}
|
|
||||||
oneofWrappersIface interface {
|
|
||||||
XXX_OneofWrappers() []interface{}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// getPropertiesLocked requires that propertiesMu is held.
|
|
||||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|
||||||
if prop, ok := propertiesMap[t]; ok {
|
|
||||||
return prop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hasOneof bool
|
||||||
prop := new(StructProperties)
|
prop := new(StructProperties)
|
||||||
// in case of recursive protos, fill this in now.
|
|
||||||
propertiesMap[t] = prop
|
|
||||||
|
|
||||||
// build properties
|
|
||||||
prop.Prop = make([]*Properties, t.NumField())
|
|
||||||
prop.order = make([]int, t.NumField())
|
|
||||||
|
|
||||||
|
// Construct a list of properties for each field in the struct.
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
f := t.Field(i)
|
|
||||||
p := new(Properties)
|
p := new(Properties)
|
||||||
name := f.Name
|
f := t.Field(i)
|
||||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
tagField := f.Tag.Get("protobuf")
|
||||||
|
p.Init(f.Type, f.Name, tagField, &f)
|
||||||
|
|
||||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||||
if oneof != "" {
|
if tagOneof != "" {
|
||||||
// Oneof fields don't use the traditional protobuf tag.
|
hasOneof = true
|
||||||
p.OrigName = oneof
|
p.OrigName = tagOneof
|
||||||
}
|
}
|
||||||
prop.Prop[i] = p
|
|
||||||
prop.order[i] = i
|
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||||
if debug {
|
// user code simply checks for this to exclude special fields.
|
||||||
print(i, " ", f.Name, " ", t.String(), " ")
|
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||||
if p.Tag > 0 {
|
p.Name = "XXX_" + p.Name
|
||||||
print(p.String())
|
p.OrigName = "XXX_" + p.OrigName
|
||||||
|
} else if p.Weak != "" {
|
||||||
|
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||||
}
|
}
|
||||||
print("\n")
|
|
||||||
|
prop.Prop = append(prop.Prop, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a mapping of oneof field names to properties.
|
||||||
|
if hasOneof {
|
||||||
|
var oneofWrappers []interface{}
|
||||||
|
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||||
|
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||||
|
}
|
||||||
|
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||||
|
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||||
|
}
|
||||||
|
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||||
|
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||||
|
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-order prop.order.
|
|
||||||
sort.Sort(prop)
|
|
||||||
|
|
||||||
var oots []interface{}
|
|
||||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
|
||||||
case oneofFuncsIface:
|
|
||||||
_, _, _, oots = m.XXX_OneofFuncs()
|
|
||||||
case oneofWrappersIface:
|
|
||||||
oots = m.XXX_OneofWrappers()
|
|
||||||
}
|
|
||||||
if len(oots) > 0 {
|
|
||||||
// Interpret oneof metadata.
|
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
for _, oot := range oots {
|
for _, wrapper := range oneofWrappers {
|
||||||
oop := &OneofProperties{
|
p := &OneofProperties{
|
||||||
Type: reflect.ValueOf(oot).Type(), // *T
|
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||||
Prop: new(Properties),
|
Prop: new(Properties),
|
||||||
}
|
}
|
||||||
sft := oop.Type.Elem().Field(0)
|
f := p.Type.Elem().Field(0)
|
||||||
oop.Prop.Name = sft.Name
|
p.Prop.Name = f.Name
|
||||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||||
// There will be exactly one interface field that
|
|
||||||
// this new value is assignable to.
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if f.Type.Kind() != reflect.Interface {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !oop.Type.AssignableTo(f.Type) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
oop.Field = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// build required counts
|
// Determine the struct field that contains this oneof.
|
||||||
// build tags
|
// Each wrapper is assignable to exactly one parent field.
|
||||||
reqCount := 0
|
var foundOneof bool
|
||||||
prop.decoderOrigNames = make(map[string]int)
|
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||||
for i, p := range prop.Prop {
|
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||||
if strings.HasPrefix(p.Name, "XXX_") {
|
p.Field = i
|
||||||
// Internal fields should not appear in tags/origNames maps.
|
foundOneof = true
|
||||||
// They are handled specially when encoding and decoding.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
if p.Required {
|
|
||||||
reqCount++
|
|
||||||
}
|
}
|
||||||
prop.decoderTags.put(p.Tag, i)
|
if !foundOneof {
|
||||||
prop.decoderOrigNames[p.OrigName] = i
|
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||||
|
}
|
||||||
|
prop.OneofTypes[p.Prop.OrigName] = p
|
||||||
|
}
|
||||||
}
|
}
|
||||||
prop.reqCount = reqCount
|
|
||||||
|
|
||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// A global registry of enum types.
|
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||||
// The generated code will register the generated maps by calling RegisterEnum.
|
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||||
|
func (sp *StructProperties) Swap(i, j int) { return }
|
||||||
var enumValueMaps = make(map[string]map[string]int32)
|
|
||||||
|
|
||||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
|
||||||
// maps into the global table to aid parsing text format protocol buffers.
|
|
||||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
|
||||||
if _, ok := enumValueMaps[typeName]; ok {
|
|
||||||
panic("proto: duplicate enum registered: " + typeName)
|
|
||||||
}
|
|
||||||
enumValueMaps[typeName] = valueMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumValueMap returns the mapping from names to integers of the
|
|
||||||
// enum type enumType, or a nil if not found.
|
|
||||||
func EnumValueMap(enumType string) map[string]int32 {
|
|
||||||
return enumValueMaps[enumType]
|
|
||||||
}
|
|
||||||
|
|
||||||
// A registry of all linked message types.
|
|
||||||
// The string is a fully-qualified proto name ("pkg.Message").
|
|
||||||
var (
|
|
||||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
|
||||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
|
||||||
revProtoTypes = make(map[reflect.Type]string)
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterType is called from generated code and maps from the fully qualified
|
|
||||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
|
||||||
func RegisterType(x Message, name string) {
|
|
||||||
if _, ok := protoTypedNils[name]; ok {
|
|
||||||
// TODO: Some day, make this a panic.
|
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := reflect.TypeOf(x)
|
|
||||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
|
||||||
// Generated code always calls RegisterType with nil x.
|
|
||||||
// This check is just for extra safety.
|
|
||||||
protoTypedNils[name] = x
|
|
||||||
} else {
|
|
||||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
|
||||||
}
|
|
||||||
revProtoTypes[t] = name
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
|
||||||
// proto name to the native map type of the proto map definition.
|
|
||||||
func RegisterMapType(x interface{}, name string) {
|
|
||||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
|
||||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
|
||||||
}
|
|
||||||
if _, ok := protoMapTypes[name]; ok {
|
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := reflect.TypeOf(x)
|
|
||||||
protoMapTypes[name] = t
|
|
||||||
revProtoTypes[t] = name
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
|
||||||
func MessageName(x Message) string {
|
|
||||||
type xname interface {
|
|
||||||
XXX_MessageName() string
|
|
||||||
}
|
|
||||||
if m, ok := x.(xname); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return revProtoTypes[reflect.TypeOf(x)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
|
||||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
|
||||||
// map entry.
|
|
||||||
func MessageType(name string) reflect.Type {
|
|
||||||
if t, ok := protoTypedNils[name]; ok {
|
|
||||||
return reflect.TypeOf(t)
|
|
||||||
}
|
|
||||||
return protoMapTypes[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// A registry of all linked proto files.
|
|
||||||
var (
|
|
||||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterFile is called from generated code and maps from the
|
|
||||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
|
||||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
|
||||||
protoFiles[filename] = fileDescriptor
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
|
||||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
|
||||||
|
|||||||
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package proto provides functionality for handling protocol buffer messages.
|
||||||
|
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||||
|
// message and the binary wire format.
|
||||||
|
//
|
||||||
|
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||||
|
// more information.
|
||||||
|
//
|
||||||
|
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtoPackageIsVersion1 = true
|
||||||
|
ProtoPackageIsVersion2 = true
|
||||||
|
ProtoPackageIsVersion3 = true
|
||||||
|
ProtoPackageIsVersion4 = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||||
|
// which is a named int32 kind.
|
||||||
|
// This type exists for documentation purposes.
|
||||||
|
type GeneratedEnum interface{}
|
||||||
|
|
||||||
|
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||||
|
// which is a pointer to a named struct kind.
|
||||||
|
// This type exists for documentation purposes.
|
||||||
|
type GeneratedMessage interface{}
|
||||||
|
|
||||||
|
// Message is a protocol buffer message.
|
||||||
|
//
|
||||||
|
// This is the v1 version of the message interface and is marginally better
|
||||||
|
// than an empty interface as it lacks any method to programatically interact
|
||||||
|
// with the contents of the message.
|
||||||
|
//
|
||||||
|
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||||
|
// exposes protobuf reflection as a first-class feature of the interface.
|
||||||
|
//
|
||||||
|
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||||
|
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||||
|
type Message = protoiface.MessageV1
|
||||||
|
|
||||||
|
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||||
|
return protoimpl.X.ProtoMessageV1Of(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||||
|
return protoimpl.X.ProtoMessageV2Of(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageReflect returns a reflective view for a message.
|
||||||
|
// It returns nil if m is nil.
|
||||||
|
func MessageReflect(m Message) protoreflect.Message {
|
||||||
|
return protoimpl.X.MessageOf(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshaler is implemented by messages that can marshal themselves.
|
||||||
|
// This interface is used by the following functions: Size, Marshal,
|
||||||
|
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal formats the encoded bytes of the message.
|
||||||
|
// It should be deterministic and emit valid protobuf wire data.
|
||||||
|
// The caller takes ownership of the returned buffer.
|
||||||
|
Marshal() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||||
|
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||||
|
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||||
|
// The provided buffer is only valid for during method call.
|
||||||
|
// It should not reset the receiver message.
|
||||||
|
Unmarshal([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merger is implemented by messages that can merge themselves.
|
||||||
|
// This interface is used by the following functions: Clone and Merge.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not implement.
|
||||||
|
type Merger interface {
|
||||||
|
// Merge merges the contents of src into the receiver message.
|
||||||
|
// It clones all data structures in src such that it aliases no mutable
|
||||||
|
// memory referenced by src.
|
||||||
|
Merge(src Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredNotSetError is an error type returned when
|
||||||
|
// marshaling or unmarshaling a message with missing required fields.
|
||||||
|
type RequiredNotSetError struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RequiredNotSetError) Error() string {
|
||||||
|
if e.err != nil {
|
||||||
|
return e.err.Error()
|
||||||
|
}
|
||||||
|
return "proto: required field not set"
|
||||||
|
}
|
||||||
|
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRequiredNotSet(m protoV2.Message) error {
|
||||||
|
if err := protoV2.CheckInitialized(m); err != nil {
|
||||||
|
return &RequiredNotSetError{err: err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a deep copy of src.
|
||||||
|
func Clone(src Message) Message {
|
||||||
|
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges src into dst, which must be messages of the same type.
|
||||||
|
//
|
||||||
|
// Populated scalar fields in src are copied to dst, while populated
|
||||||
|
// singular messages in src are merged into dst by recursively calling Merge.
|
||||||
|
// The elements of every list field in src is appended to the corresponded
|
||||||
|
// list fields in dst. The entries of every map field in src is copied into
|
||||||
|
// the corresponding map field in dst, possibly replacing existing entries.
|
||||||
|
// The unknown fields of src are appended to the unknown fields of dst.
|
||||||
|
func Merge(dst, src Message) {
|
||||||
|
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal reports whether two messages are equal.
|
||||||
|
// If two messages marshal to the same bytes under deterministic serialization,
|
||||||
|
// then Equal is guaranteed to report true.
|
||||||
|
//
|
||||||
|
// Two messages are equal if they are the same protobuf message type,
|
||||||
|
// have the same set of populated known and extension field values,
|
||||||
|
// and the same set of unknown fields values.
|
||||||
|
//
|
||||||
|
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||||
|
// except bytes values which are compared using bytes.Equal and
|
||||||
|
// floating point values which specially treat NaNs as equal.
|
||||||
|
// Message values are compared by recursively calling Equal.
|
||||||
|
// Lists are equal if each element value is also equal.
|
||||||
|
// Maps are equal if they have the same set of keys, where the pair of values
|
||||||
|
// for each key is also equal.
|
||||||
|
func Equal(x, y Message) bool {
|
||||||
|
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||||
|
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||||
|
return ok && ms.IsMessageSet()
|
||||||
|
}
|
||||||
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// filePath is the path to the proto source file.
|
||||||
|
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||||
|
|
||||||
|
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||||
|
type fileDescGZIP = []byte
|
||||||
|
|
||||||
|
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||||
|
|
||||||
|
// RegisterFile is called from generated code to register the compressed
|
||||||
|
// FileDescriptorProto with the file path for a proto source file.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||||
|
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||||
|
// Decompress the descriptor.
|
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(zr)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||||
|
// Note that DescBuilder.Build automatically registers the constructed
|
||||||
|
// file descriptor with the v2 registry.
|
||||||
|
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||||
|
|
||||||
|
// Locally cache the raw descriptor form for the file.
|
||||||
|
fileCache.Store(s, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||||
|
// for a proto source file. It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||||
|
func FileDescriptor(s filePath) fileDescGZIP {
|
||||||
|
if v, ok := fileCache.Load(s); ok {
|
||||||
|
return v.(fileDescGZIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the descriptor in the v2 registry.
|
||||||
|
var b []byte
|
||||||
|
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||||
|
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
|
||||||
|
b = fd.ProtoLegacyRawDesc()
|
||||||
|
} else {
|
||||||
|
// TODO: Use protodesc.ToFileDescriptorProto to construct
|
||||||
|
// a descriptorpb.FileDescriptorProto and marshal it.
|
||||||
|
// However, doing so causes the proto package to have a dependency
|
||||||
|
// on descriptorpb, leading to cyclic dependency issues.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locally cache the raw descriptor form for the file.
|
||||||
|
if len(b) > 0 {
|
||||||
|
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||||
|
return v.(fileDescGZIP)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||||
|
// neither the full Go name nor the full protobuf name of the enum.
|
||||||
|
// The name is the dot-separated combination of just the proto package that the
|
||||||
|
// enum is declared within followed by the Go type name of the generated enum.
|
||||||
|
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||||
|
|
||||||
|
// enumsByName maps enum values by name to their numeric counterpart.
|
||||||
|
type enumsByName = map[string]int32
|
||||||
|
|
||||||
|
// enumsByNumber maps enum values by number to their name counterpart.
|
||||||
|
type enumsByNumber = map[int32]string
|
||||||
|
|
||||||
|
var enumCache sync.Map // map[enumName]enumsByName
|
||||||
|
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||||
|
|
||||||
|
// RegisterEnum is called from the generated code to register the mapping of
|
||||||
|
// enum value names to enum numbers for the enum identified by s.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||||
|
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||||
|
if _, ok := enumCache.Load(s); ok {
|
||||||
|
panic("proto: duplicate enum registered: " + s)
|
||||||
|
}
|
||||||
|
enumCache.Store(s, m)
|
||||||
|
|
||||||
|
// This does not forward registration to the v2 registry since this API
|
||||||
|
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||||
|
// the enum of the given name. It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||||
|
func EnumValueMap(s enumName) enumsByName {
|
||||||
|
if v, ok := enumCache.Load(s); ok {
|
||||||
|
return v.(enumsByName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether the cache is stale. If the number of files in the current
|
||||||
|
// package differs, then it means that some enums may have been recently
|
||||||
|
// registered upstream that we do not know about.
|
||||||
|
var protoPkg protoreflect.FullName
|
||||||
|
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||||
|
protoPkg = protoreflect.FullName(s[:i])
|
||||||
|
}
|
||||||
|
v, _ := numFilesCache.Load(protoPkg)
|
||||||
|
numFiles, _ := v.(int)
|
||||||
|
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||||
|
return nil // cache is up-to-date; was not found earlier
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the enum cache for all enums declared in the given proto package.
|
||||||
|
numFiles = 0
|
||||||
|
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||||
|
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||||
|
name := protoimpl.X.LegacyEnumName(ed)
|
||||||
|
if _, ok := enumCache.Load(name); !ok {
|
||||||
|
m := make(enumsByName)
|
||||||
|
evs := ed.Values()
|
||||||
|
for i := evs.Len() - 1; i >= 0; i-- {
|
||||||
|
ev := evs.Get(i)
|
||||||
|
m[string(ev.Name())] = int32(ev.Number())
|
||||||
|
}
|
||||||
|
enumCache.LoadOrStore(name, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
numFiles++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
numFilesCache.Store(protoPkg, numFiles)
|
||||||
|
|
||||||
|
// Check cache again for enum map.
|
||||||
|
if v, ok := enumCache.Load(s); ok {
|
||||||
|
return v.(enumsByName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkEnums recursively walks all enums declared in d.
|
||||||
|
func walkEnums(d interface {
|
||||||
|
Enums() protoreflect.EnumDescriptors
|
||||||
|
Messages() protoreflect.MessageDescriptors
|
||||||
|
}, f func(protoreflect.EnumDescriptor)) {
|
||||||
|
eds := d.Enums()
|
||||||
|
for i := eds.Len() - 1; i >= 0; i-- {
|
||||||
|
f(eds.Get(i))
|
||||||
|
}
|
||||||
|
mds := d.Messages()
|
||||||
|
for i := mds.Len() - 1; i >= 0; i-- {
|
||||||
|
walkEnums(mds.Get(i), f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageName is the full name of protobuf message.
|
||||||
|
type messageName = string
|
||||||
|
|
||||||
|
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||||
|
|
||||||
|
// RegisterType is called from generated code to register the message Go type
|
||||||
|
// for a message of the given name.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||||
|
func RegisterType(m Message, s messageName) {
|
||||||
|
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||||
|
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMapType is called from generated code to register the Go map type
|
||||||
|
// for a protobuf message representing a map entry.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
func RegisterMapType(m interface{}, s messageName) {
|
||||||
|
t := reflect.TypeOf(m)
|
||||||
|
if t.Kind() != reflect.Map {
|
||||||
|
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||||
|
}
|
||||||
|
if _, ok := messageTypeCache.Load(s); ok {
|
||||||
|
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||||
|
}
|
||||||
|
messageTypeCache.Store(s, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageType returns the message type for a named message.
|
||||||
|
// It returns nil if not found.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||||
|
func MessageType(s messageName) reflect.Type {
|
||||||
|
if v, ok := messageTypeCache.Load(s); ok {
|
||||||
|
return v.(reflect.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive the message type from the v2 registry.
|
||||||
|
var t reflect.Type
|
||||||
|
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||||
|
t = messageGoType(mt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we could not get a concrete type, it is possible that it is a
|
||||||
|
// pseudo-message for a map entry.
|
||||||
|
if t == nil {
|
||||||
|
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||||
|
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||||
|
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||||
|
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||||
|
t = reflect.MapOf(kt, vt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locally cache the message type for the given name.
|
||||||
|
if t != nil {
|
||||||
|
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||||
|
return v.(reflect.Type)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||||
|
switch k := fd.Kind(); k {
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||||
|
return enumGoType(et)
|
||||||
|
}
|
||||||
|
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||||
|
return messageGoType(mt)
|
||||||
|
}
|
||||||
|
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||||
|
default:
|
||||||
|
return reflect.TypeOf(fd.Default().Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||||
|
return reflect.TypeOf(et.New(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||||
|
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageName returns the full protobuf name for the given message type.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||||
|
func MessageName(m Message) messageName {
|
||||||
|
if m == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||||
|
return m.XXX_MessageName()
|
||||||
|
}
|
||||||
|
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterExtension is called from the generated code to register
|
||||||
|
// the extension descriptor.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||||
|
func RegisterExtension(d *ExtensionDesc) {
|
||||||
|
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||||
|
|
||||||
|
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||||
|
|
||||||
|
// RegisteredExtensions returns a map of the registered extensions for the
|
||||||
|
// provided protobuf message, indexed by the extension field number.
|
||||||
|
//
|
||||||
|
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||||
|
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||||
|
// Check whether the cache is stale. If the number of extensions for
|
||||||
|
// the given message differs, then it means that some extensions were
|
||||||
|
// recently registered upstream that we do not know about.
|
||||||
|
s := MessageName(m)
|
||||||
|
v, _ := extensionCache.Load(s)
|
||||||
|
xs, _ := v.(extensionsByNumber)
|
||||||
|
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||||
|
return xs // cache is up-to-date
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache is stale, re-compute the extensions map.
|
||||||
|
xs = make(extensionsByNumber)
|
||||||
|
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||||
|
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||||
|
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||||
|
} else {
|
||||||
|
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||||
|
// custom type not generated by protoc-gen-go. We could try and
|
||||||
|
// convert the type to an ExtensionDesc.
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
extensionCache.Store(s, xs)
|
||||||
|
return xs
|
||||||
|
}
|
||||||
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
@@ -1,654 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Merge merges the src message into dst.
|
|
||||||
// This assumes that dst and src of the same type and are non-nil.
|
|
||||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
|
||||||
mi := atomicLoadMergeInfo(&a.merge)
|
|
||||||
if mi == nil {
|
|
||||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
|
||||||
atomicStoreMergeInfo(&a.merge, mi)
|
|
||||||
}
|
|
||||||
mi.merge(toPointer(&dst), toPointer(&src))
|
|
||||||
}
|
|
||||||
|
|
||||||
type mergeInfo struct {
|
|
||||||
typ reflect.Type
|
|
||||||
|
|
||||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
|
||||||
lock sync.Mutex
|
|
||||||
|
|
||||||
fields []mergeFieldInfo
|
|
||||||
unrecognized field // Offset of XXX_unrecognized
|
|
||||||
}
|
|
||||||
|
|
||||||
type mergeFieldInfo struct {
|
|
||||||
field field // Offset of field, guaranteed to be valid
|
|
||||||
|
|
||||||
// isPointer reports whether the value in the field is a pointer.
|
|
||||||
// This is true for the following situations:
|
|
||||||
// * Pointer to struct
|
|
||||||
// * Pointer to basic type (proto2 only)
|
|
||||||
// * Slice (first value in slice header is a pointer)
|
|
||||||
// * String (first value in string header is a pointer)
|
|
||||||
isPointer bool
|
|
||||||
|
|
||||||
// basicWidth reports the width of the field assuming that it is directly
|
|
||||||
// embedded in the struct (as is the case for basic types in proto3).
|
|
||||||
// The possible values are:
|
|
||||||
// 0: invalid
|
|
||||||
// 1: bool
|
|
||||||
// 4: int32, uint32, float32
|
|
||||||
// 8: int64, uint64, float64
|
|
||||||
basicWidth int
|
|
||||||
|
|
||||||
// Where dst and src are pointers to the types being merged.
|
|
||||||
merge func(dst, src pointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
|
||||||
mergeInfoLock sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
|
||||||
mergeInfoLock.Lock()
|
|
||||||
defer mergeInfoLock.Unlock()
|
|
||||||
mi := mergeInfoMap[t]
|
|
||||||
if mi == nil {
|
|
||||||
mi = &mergeInfo{typ: t}
|
|
||||||
mergeInfoMap[t] = mi
|
|
||||||
}
|
|
||||||
return mi
|
|
||||||
}
|
|
||||||
|
|
||||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
|
||||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
|
||||||
if dst.isNil() {
|
|
||||||
panic("proto: nil destination")
|
|
||||||
}
|
|
||||||
if src.isNil() {
|
|
||||||
return // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
|
||||||
mi.computeMergeInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range mi.fields {
|
|
||||||
sfp := src.offset(fi.field)
|
|
||||||
|
|
||||||
// As an optimization, we can avoid the merge function call cost
|
|
||||||
// if we know for sure that the source will have no effect
|
|
||||||
// by checking if it is the zero value.
|
|
||||||
if unsafeAllowed {
|
|
||||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fi.basicWidth > 0 {
|
|
||||||
switch {
|
|
||||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
|
||||||
continue
|
|
||||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
|
||||||
continue
|
|
||||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dfp := dst.offset(fi.field)
|
|
||||||
fi.merge(dfp, sfp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Make this faster?
|
|
||||||
out := dst.asPointerTo(mi.typ).Elem()
|
|
||||||
in := src.asPointerTo(mi.typ).Elem()
|
|
||||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
|
||||||
mIn, muIn := emIn.extensionsRead()
|
|
||||||
if mIn != nil {
|
|
||||||
mOut := emOut.extensionsWrite()
|
|
||||||
muIn.Lock()
|
|
||||||
mergeExtension(mOut, mIn)
|
|
||||||
muIn.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mi.unrecognized.IsValid() {
|
|
||||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
|
||||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mi *mergeInfo) computeMergeInfo() {
|
|
||||||
mi.lock.Lock()
|
|
||||||
defer mi.lock.Unlock()
|
|
||||||
if mi.initialized != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := mi.typ
|
|
||||||
n := t.NumField()
|
|
||||||
|
|
||||||
props := GetProperties(t)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if strings.HasPrefix(f.Name, "XXX_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
mfi := mergeFieldInfo{field: toField(&f)}
|
|
||||||
tf := f.Type
|
|
||||||
|
|
||||||
// As an optimization, we can avoid the merge function call cost
|
|
||||||
// if we know for sure that the source will have no effect
|
|
||||||
// by checking if it is the zero value.
|
|
||||||
if unsafeAllowed {
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
|
||||||
// As a special case, we assume slices and strings are pointers
|
|
||||||
// since we know that the first field in the SliceSlice or
|
|
||||||
// StringHeader is a data pointer.
|
|
||||||
mfi.isPointer = true
|
|
||||||
case reflect.Bool:
|
|
||||||
mfi.basicWidth = 1
|
|
||||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
|
||||||
mfi.basicWidth = 4
|
|
||||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
|
||||||
mfi.basicWidth = 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap tf to get at its most basic type.
|
|
||||||
var isPointer, isSlice bool
|
|
||||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
|
||||||
isSlice = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if tf.Kind() == reflect.Ptr {
|
|
||||||
isPointer = true
|
|
||||||
tf = tf.Elem()
|
|
||||||
}
|
|
||||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
|
||||||
panic("both pointer and slice for basic type in " + tf.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
|
||||||
/*
|
|
||||||
sfsp := src.toInt32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toInt32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []int64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
sfs := src.getInt32Slice()
|
|
||||||
if sfs != nil {
|
|
||||||
dfs := dst.getInt32Slice()
|
|
||||||
dfs = append(dfs, sfs...)
|
|
||||||
if dfs == nil {
|
|
||||||
dfs = []int32{}
|
|
||||||
}
|
|
||||||
dst.setInt32Slice(dfs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
|
||||||
/*
|
|
||||||
sfpp := src.toInt32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toInt32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Int32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
sfp := src.getInt32Ptr()
|
|
||||||
if sfp != nil {
|
|
||||||
dfp := dst.getInt32Ptr()
|
|
||||||
if dfp == nil {
|
|
||||||
dst.setInt32Ptr(*sfp)
|
|
||||||
} else {
|
|
||||||
*dfp = *sfp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., int32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toInt32(); v != 0 {
|
|
||||||
*dst.toInt32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Int64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toInt64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toInt64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []int64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toInt64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toInt64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Int64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., int64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toInt64(); v != 0 {
|
|
||||||
*dst.toInt64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toUint32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toUint32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []uint32{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toUint32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toUint32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Uint32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., uint32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toUint32(); v != 0 {
|
|
||||||
*dst.toUint32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toUint64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toUint64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []uint64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toUint64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toUint64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Uint64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., uint64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toUint64(); v != 0 {
|
|
||||||
*dst.toUint64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Float32:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toFloat32Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toFloat32Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []float32{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toFloat32Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toFloat32Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Float32(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., float32
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toFloat32(); v != 0 {
|
|
||||||
*dst.toFloat32() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Float64:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toFloat64Slice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toFloat64Slice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []float64{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toFloat64Ptr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toFloat64Ptr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Float64(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., float64
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toFloat64(); v != 0 {
|
|
||||||
*dst.toFloat64() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Bool:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toBoolSlice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toBoolSlice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []bool{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toBoolPtr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toBoolPtr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = Bool(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., bool
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toBool(); v {
|
|
||||||
*dst.toBool() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
switch {
|
|
||||||
case isSlice: // E.g., []string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfsp := src.toStringSlice()
|
|
||||||
if *sfsp != nil {
|
|
||||||
dfsp := dst.toStringSlice()
|
|
||||||
*dfsp = append(*dfsp, *sfsp...)
|
|
||||||
if *dfsp == nil {
|
|
||||||
*dfsp = []string{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case isPointer: // E.g., *string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sfpp := src.toStringPtr()
|
|
||||||
if *sfpp != nil {
|
|
||||||
dfpp := dst.toStringPtr()
|
|
||||||
if *dfpp == nil {
|
|
||||||
*dfpp = String(**sfpp)
|
|
||||||
} else {
|
|
||||||
**dfpp = **sfpp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., string
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
if v := *src.toString(); v != "" {
|
|
||||||
*dst.toString() = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
isProto3 := props.Prop[i].proto3
|
|
||||||
switch {
|
|
||||||
case isPointer:
|
|
||||||
panic("bad pointer in byte slice case in " + tf.Name())
|
|
||||||
case tf.Elem().Kind() != reflect.Uint8:
|
|
||||||
panic("bad element kind in byte slice case in " + tf.Name())
|
|
||||||
case isSlice: // E.g., [][]byte
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sbsp := src.toBytesSlice()
|
|
||||||
if *sbsp != nil {
|
|
||||||
dbsp := dst.toBytesSlice()
|
|
||||||
for _, sb := range *sbsp {
|
|
||||||
if sb == nil {
|
|
||||||
*dbsp = append(*dbsp, nil)
|
|
||||||
} else {
|
|
||||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if *dbsp == nil {
|
|
||||||
*dbsp = [][]byte{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., []byte
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sbp := src.toBytes()
|
|
||||||
if *sbp != nil {
|
|
||||||
dbp := dst.toBytes()
|
|
||||||
if !isProto3 || len(*sbp) > 0 {
|
|
||||||
*dbp = append([]byte{}, *sbp...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
switch {
|
|
||||||
case !isPointer:
|
|
||||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
|
||||||
case isSlice: // E.g., []*pb.T
|
|
||||||
mi := getMergeInfo(tf)
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sps := src.getPointerSlice()
|
|
||||||
if sps != nil {
|
|
||||||
dps := dst.getPointerSlice()
|
|
||||||
for _, sp := range sps {
|
|
||||||
var dp pointer
|
|
||||||
if !sp.isNil() {
|
|
||||||
dp = valToPointer(reflect.New(tf))
|
|
||||||
mi.merge(dp, sp)
|
|
||||||
}
|
|
||||||
dps = append(dps, dp)
|
|
||||||
}
|
|
||||||
if dps == nil {
|
|
||||||
dps = []pointer{}
|
|
||||||
}
|
|
||||||
dst.setPointerSlice(dps)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default: // E.g., *pb.T
|
|
||||||
mi := getMergeInfo(tf)
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sp := src.getPointer()
|
|
||||||
if !sp.isNil() {
|
|
||||||
dp := dst.getPointer()
|
|
||||||
if dp.isNil() {
|
|
||||||
dp = valToPointer(reflect.New(tf))
|
|
||||||
dst.setPointer(dp)
|
|
||||||
}
|
|
||||||
mi.merge(dp, sp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic("bad pointer or slice in map case in " + tf.Name())
|
|
||||||
default: // E.g., map[K]V
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
sm := src.asPointerTo(tf).Elem()
|
|
||||||
if sm.Len() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dm := dst.asPointerTo(tf).Elem()
|
|
||||||
if dm.IsNil() {
|
|
||||||
dm.Set(reflect.MakeMap(tf))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tf.Elem().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
default: // Basic type (e.g., string)
|
|
||||||
for _, key := range sm.MapKeys() {
|
|
||||||
val := sm.MapIndex(key)
|
|
||||||
dm.SetMapIndex(key, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// Must be oneof field.
|
|
||||||
switch {
|
|
||||||
case isPointer || isSlice:
|
|
||||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
|
||||||
default: // E.g., interface{}
|
|
||||||
// TODO: Make this faster?
|
|
||||||
mfi.merge = func(dst, src pointer) {
|
|
||||||
su := src.asPointerTo(tf).Elem()
|
|
||||||
if !su.IsNil() {
|
|
||||||
du := dst.asPointerTo(tf).Elem()
|
|
||||||
typ := su.Elem().Type()
|
|
||||||
if du.IsNil() || du.Elem().Type() != typ {
|
|
||||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
|
||||||
}
|
|
||||||
sv := su.Elem().Elem().Field(0)
|
|
||||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dv := du.Elem().Elem().Field(0)
|
|
||||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
|
||||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
|
||||||
}
|
|
||||||
switch sv.Type().Kind() {
|
|
||||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
|
||||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
|
||||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
|
||||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
|
||||||
default: // Basic type (e.g., string)
|
|
||||||
dv.Set(sv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
|
||||||
}
|
|
||||||
mi.fields = append(mi.fields, mfi)
|
|
||||||
}
|
|
||||||
|
|
||||||
mi.unrecognized = invalidField
|
|
||||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
|
||||||
if f.Type != reflect.TypeOf([]byte{}) {
|
|
||||||
panic("expected XXX_unrecognized to be of type []byte")
|
|
||||||
}
|
|
||||||
mi.unrecognized = toField(&f)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.StoreInt32(&mi.initialized, 1)
|
|
||||||
}
|
|
||||||
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
845
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
845
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@@ -1,845 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Functions for writing the text protocol buffer format.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
newline = []byte("\n")
|
|
||||||
spaces = []byte(" ")
|
|
||||||
endBraceNewline = []byte("}\n")
|
|
||||||
backslashN = []byte{'\\', 'n'}
|
|
||||||
backslashR = []byte{'\\', 'r'}
|
|
||||||
backslashT = []byte{'\\', 't'}
|
|
||||||
backslashDQ = []byte{'\\', '"'}
|
|
||||||
backslashBS = []byte{'\\', '\\'}
|
|
||||||
posInf = []byte("inf")
|
|
||||||
negInf = []byte("-inf")
|
|
||||||
nan = []byte("nan")
|
|
||||||
)
|
|
||||||
|
|
||||||
type writer interface {
|
|
||||||
io.Writer
|
|
||||||
WriteByte(byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// textWriter is an io.Writer that tracks its indentation level.
|
|
||||||
type textWriter struct {
|
|
||||||
ind int
|
|
||||||
complete bool // if the current position is a complete line
|
|
||||||
compact bool // whether to write out as a one-liner
|
|
||||||
w writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
|
||||||
if !strings.Contains(s, "\n") {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
return io.WriteString(w.w, s)
|
|
||||||
}
|
|
||||||
// WriteString is typically called without newlines, so this
|
|
||||||
// codepath and its copy are rare. We copy to avoid
|
|
||||||
// duplicating all of Write's logic here.
|
|
||||||
return w.Write([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
|
||||||
newlines := bytes.Count(p, newline)
|
|
||||||
if newlines == 0 {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
n, err = w.w.Write(p)
|
|
||||||
w.complete = false
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
frags := bytes.SplitN(p, newline, newlines+1)
|
|
||||||
if w.compact {
|
|
||||||
for i, frag := range frags {
|
|
||||||
if i > 0 {
|
|
||||||
if err := w.w.WriteByte(' '); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
nn, err := w.w.Write(frag)
|
|
||||||
n += nn
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, frag := range frags {
|
|
||||||
if w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
nn, err := w.w.Write(frag)
|
|
||||||
n += nn
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if i+1 < len(frags) {
|
|
||||||
if err := w.w.WriteByte('\n'); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.complete = len(frags[len(frags)-1]) == 0
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteByte(c byte) error {
|
|
||||||
if w.compact && c == '\n' {
|
|
||||||
c = ' '
|
|
||||||
}
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
err := w.w.WriteByte(c)
|
|
||||||
w.complete = c == '\n'
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) indent() { w.ind++ }
|
|
||||||
|
|
||||||
func (w *textWriter) unindent() {
|
|
||||||
if w.ind == 0 {
|
|
||||||
log.Print("proto: textWriter unindented too far")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.ind--
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeName(w *textWriter, props *Properties) error {
|
|
||||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if props.Wire != "group" {
|
|
||||||
return w.WriteByte(':')
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|
||||||
for _, ch := range u {
|
|
||||||
switch {
|
|
||||||
case ch == '.' || ch == '/' || ch == '_':
|
|
||||||
continue
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
continue
|
|
||||||
case 'A' <= ch && ch <= 'Z':
|
|
||||||
continue
|
|
||||||
case 'a' <= ch && ch <= 'z':
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAny reports whether sv is a google.protobuf.Any message
|
|
||||||
func isAny(sv reflect.Value) bool {
|
|
||||||
type wkt interface {
|
|
||||||
XXX_WellKnownType() string
|
|
||||||
}
|
|
||||||
t, ok := sv.Addr().Interface().(wkt)
|
|
||||||
return ok && t.XXX_WellKnownType() == "Any"
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|
||||||
//
|
|
||||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|
||||||
// required messages are not linked in).
|
|
||||||
//
|
|
||||||
// It returns (true, error) when sv was written in expanded format or an error
|
|
||||||
// was encountered.
|
|
||||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
|
||||||
turl := sv.FieldByName("TypeUrl")
|
|
||||||
val := sv.FieldByName("Value")
|
|
||||||
if !turl.IsValid() || !val.IsValid() {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
b, ok := val.Interface().([]byte)
|
|
||||||
if !ok {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(turl.String(), "/")
|
|
||||||
mt := MessageType(parts[len(parts)-1])
|
|
||||||
if mt == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
m := reflect.New(mt.Elem())
|
|
||||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
w.Write([]byte("["))
|
|
||||||
u := turl.String()
|
|
||||||
if requiresQuotes(u) {
|
|
||||||
writeString(w, u)
|
|
||||||
} else {
|
|
||||||
w.Write([]byte(u))
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("]:<"))
|
|
||||||
} else {
|
|
||||||
w.Write([]byte("]: <\n"))
|
|
||||||
w.ind++
|
|
||||||
}
|
|
||||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("> "))
|
|
||||||
} else {
|
|
||||||
w.ind--
|
|
||||||
w.Write([]byte(">\n"))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|
||||||
if tm.ExpandAny && isAny(sv) {
|
|
||||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
for i := 0; i < sv.NumField(); i++ {
|
|
||||||
fv := sv.Field(i)
|
|
||||||
props := sprops.Prop[i]
|
|
||||||
name := st.Field(i).Name
|
|
||||||
|
|
||||||
if name == "XXX_NoUnkeyedLiteral" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(name, "XXX_") {
|
|
||||||
// There are two XXX_ fields:
|
|
||||||
// XXX_unrecognized []byte
|
|
||||||
// XXX_extensions map[int32]proto.Extension
|
|
||||||
// The first is handled here;
|
|
||||||
// the second is handled at the bottom of this function.
|
|
||||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
|
||||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
|
||||||
// Field not filled in. This could be an optional field or
|
|
||||||
// a required field that wasn't filled in. Either way, there
|
|
||||||
// isn't anything we can show for it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
|
||||||
// Repeated field that is empty, or a bytes field that is unused.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
|
||||||
// Repeated field.
|
|
||||||
for j := 0; j < fv.Len(); j++ {
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v := fv.Index(j)
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
// A nil message in a repeated field is not valid,
|
|
||||||
// but we can handle that more gracefully than panicking.
|
|
||||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, v, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() == reflect.Map {
|
|
||||||
// Map fields are rendered as a repeated struct with key/value fields.
|
|
||||||
keys := fv.MapKeys()
|
|
||||||
sort.Sort(mapKeys(keys))
|
|
||||||
for _, key := range keys {
|
|
||||||
val := fv.MapIndex(key)
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// open struct
|
|
||||||
if err := w.WriteByte('<'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
// key
|
|
||||||
if _, err := w.WriteString("key:"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// nil values aren't legal, but we can avoid panicking because of them.
|
|
||||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
|
||||||
// value
|
|
||||||
if _, err := w.WriteString("value:"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// close struct
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte('>'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
|
||||||
// empty bytes field
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
|
||||||
// proto3 non-repeated scalar field; skip if zero value
|
|
||||||
if isProto3Zero(fv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fv.Kind() == reflect.Interface {
|
|
||||||
// Check if it is a oneof.
|
|
||||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
|
||||||
// fv is nil, or holds a pointer to generated struct.
|
|
||||||
// That generated struct has exactly one field,
|
|
||||||
// which has a protobuf struct tag.
|
|
||||||
if fv.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
|
||||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
|
||||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
|
||||||
props.Parse(tag)
|
|
||||||
// Write the value in the oneof, not the oneof itself.
|
|
||||||
fv = inner.Field(0)
|
|
||||||
|
|
||||||
// Special case to cope with malformed messages gracefully:
|
|
||||||
// If the value in the oneof is a nil pointer, don't panic
|
|
||||||
// in writeAny.
|
|
||||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
|
||||||
// Use errors.New so writeAny won't render quotes.
|
|
||||||
msg := errors.New("/* nil */")
|
|
||||||
fv = reflect.ValueOf(&msg).Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeName(w, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enums have a String method, so writeAny will work fine.
|
|
||||||
if err := tm.writeAny(w, fv, props); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extensions (the XXX_extensions field).
|
|
||||||
pv := sv.Addr()
|
|
||||||
if _, err := extendable(pv.Interface()); err == nil {
|
|
||||||
if err := tm.writeExtensions(w, pv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
|
||||||
|
|
||||||
// writeAny writes an arbitrary field.
|
|
||||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
|
||||||
v = reflect.Indirect(v)
|
|
||||||
|
|
||||||
// Floats have special cases.
|
|
||||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
|
||||||
x := v.Float()
|
|
||||||
var b []byte
|
|
||||||
switch {
|
|
||||||
case math.IsInf(x, 1):
|
|
||||||
b = posInf
|
|
||||||
case math.IsInf(x, -1):
|
|
||||||
b = negInf
|
|
||||||
case math.IsNaN(x):
|
|
||||||
b = nan
|
|
||||||
}
|
|
||||||
if b != nil {
|
|
||||||
_, err := w.Write(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Other values are handled below.
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't attempt to serialise every possible value type; only those
|
|
||||||
// that can occur in protocol buffers.
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
|
||||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
if err := writeString(w, v.String()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
// Required/optional group/message.
|
|
||||||
var bra, ket byte = '<', '>'
|
|
||||||
if props != nil && props.Wire == "group" {
|
|
||||||
bra, ket = '{', '}'
|
|
||||||
}
|
|
||||||
if err := w.WriteByte(bra); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
if v.CanAddr() {
|
|
||||||
// Calling v.Interface on a struct causes the reflect package to
|
|
||||||
// copy the entire struct. This is racy with the new Marshaler
|
|
||||||
// since we atomically update the XXX_sizecache.
|
|
||||||
//
|
|
||||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
|
||||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
|
||||||
//
|
|
||||||
// If v is not addressable, then we are not worried about a race
|
|
||||||
// since it implies that the binary Marshaler cannot possibly be
|
|
||||||
// mutating this value.
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
if v.Type().Implements(textMarshalerType) {
|
|
||||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.Write(text); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
if err := tm.writeStruct(w, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte(ket); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
_, err := fmt.Fprint(w, v.Interface())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// equivalent to C's isprint.
|
|
||||||
func isprint(c byte) bool {
|
|
||||||
return c >= 0x20 && c < 0x7f
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeString writes a string in the protocol buffer text format.
|
|
||||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
|
||||||
// we treat the string as a byte sequence, and we use octal escapes.
|
|
||||||
// These differences are to maintain interoperability with the other
|
|
||||||
// languages' implementations of the text format.
|
|
||||||
func writeString(w *textWriter, s string) error {
|
|
||||||
// use WriteByte here to get any needed indent
|
|
||||||
if err := w.WriteByte('"'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Loop over the bytes, not the runes.
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
var err error
|
|
||||||
// Divergence from C++: we don't escape apostrophes.
|
|
||||||
// There's no need to escape them, and the C++ parser
|
|
||||||
// copes with a naked apostrophe.
|
|
||||||
switch c := s[i]; c {
|
|
||||||
case '\n':
|
|
||||||
_, err = w.w.Write(backslashN)
|
|
||||||
case '\r':
|
|
||||||
_, err = w.w.Write(backslashR)
|
|
||||||
case '\t':
|
|
||||||
_, err = w.w.Write(backslashT)
|
|
||||||
case '"':
|
|
||||||
_, err = w.w.Write(backslashDQ)
|
|
||||||
case '\\':
|
|
||||||
_, err = w.w.Write(backslashBS)
|
|
||||||
default:
|
|
||||||
if isprint(c) {
|
|
||||||
err = w.w.WriteByte(c)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return w.WriteByte('"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
|
||||||
if !w.compact {
|
|
||||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b := NewBuffer(data)
|
|
||||||
for b.index < len(b.buf) {
|
|
||||||
x, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wire, tag := x&7, x>>3
|
|
||||||
if wire == WireEndGroup {
|
|
||||||
w.unindent()
|
|
||||||
if _, err := w.Write(endBraceNewline); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if wire != WireStartGroup {
|
|
||||||
if err := w.WriteByte(':'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !w.compact || wire == WireStartGroup {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch wire {
|
|
||||||
case WireBytes:
|
|
||||||
buf, e := b.DecodeRawBytes(false)
|
|
||||||
if e == nil {
|
|
||||||
_, err = fmt.Fprintf(w, "%q", buf)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
|
||||||
}
|
|
||||||
case WireFixed32:
|
|
||||||
x, err = b.DecodeFixed32()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
case WireFixed64:
|
|
||||||
x, err = b.DecodeFixed64()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
case WireStartGroup:
|
|
||||||
err = w.WriteByte('{')
|
|
||||||
w.indent()
|
|
||||||
case WireVarint:
|
|
||||||
x, err = b.DecodeVarint()
|
|
||||||
err = writeUnknownInt(w, x, err)
|
|
||||||
default:
|
|
||||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
|
||||||
if err == nil {
|
|
||||||
_, err = fmt.Fprint(w, x)
|
|
||||||
} else {
|
|
||||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type int32Slice []int32
|
|
||||||
|
|
||||||
func (s int32Slice) Len() int { return len(s) }
|
|
||||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
|
||||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
|
|
||||||
// writeExtensions writes all the extensions in pv.
|
|
||||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
|
||||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
|
||||||
emap := extensionMaps[pv.Type().Elem()]
|
|
||||||
ep, _ := extendable(pv.Interface())
|
|
||||||
|
|
||||||
// Order the extensions by ID.
|
|
||||||
// This isn't strictly necessary, but it will give us
|
|
||||||
// canonical output, which will also make testing easier.
|
|
||||||
m, mu := ep.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
ids := make([]int32, 0, len(m))
|
|
||||||
for id := range m {
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
sort.Sort(int32Slice(ids))
|
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
for _, extNum := range ids {
|
|
||||||
ext := m[extNum]
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
if emap != nil {
|
|
||||||
desc = emap[extNum]
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
// Unknown extension.
|
|
||||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pb, err := GetExtension(ep, desc)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed getting extension: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repeated extensions will appear as a slice.
|
|
||||||
if !desc.repeated() {
|
|
||||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v := reflect.ValueOf(pb)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
|
||||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeIndent() {
|
|
||||||
if !w.complete {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remain := w.ind * 2
|
|
||||||
for remain > 0 {
|
|
||||||
n := remain
|
|
||||||
if n > len(spaces) {
|
|
||||||
n = len(spaces)
|
|
||||||
}
|
|
||||||
w.w.Write(spaces[:n])
|
|
||||||
remain -= n
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextMarshaler is a configurable text format marshaler.
|
|
||||||
type TextMarshaler struct {
|
|
||||||
Compact bool // use compact text format (one line).
|
|
||||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal writes a given protocol buffer in text format.
|
|
||||||
// The only errors returned are from w.
|
|
||||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|
||||||
val := reflect.ValueOf(pb)
|
|
||||||
if pb == nil || val.IsNil() {
|
|
||||||
w.Write([]byte("<nil>"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bw *bufio.Writer
|
|
||||||
ww, ok := w.(writer)
|
|
||||||
if !ok {
|
|
||||||
bw = bufio.NewWriter(w)
|
|
||||||
ww = bw
|
|
||||||
}
|
|
||||||
aw := &textWriter{
|
|
||||||
w: ww,
|
|
||||||
complete: true,
|
|
||||||
compact: tm.Compact,
|
|
||||||
}
|
|
||||||
|
|
||||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
|
||||||
text, err := etm.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = aw.Write(text); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bw != nil {
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Dereference the received pointer so we don't have outer < and >.
|
|
||||||
v := reflect.Indirect(val)
|
|
||||||
if err := tm.writeStruct(aw, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bw != nil {
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text is the same as Marshal, but returns the string directly.
|
|
||||||
func (tm *TextMarshaler) Text(pb Message) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
tm.Marshal(&buf, pb)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTextMarshaler = TextMarshaler{}
|
|
||||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: consider removing some of the Marshal functions below.
|
|
||||||
|
|
||||||
// MarshalText writes a given protocol buffer in text format.
|
|
||||||
// The only errors returned are from w.
|
|
||||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
|
||||||
|
|
||||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
|
||||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
|
||||||
|
|
||||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
|
||||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
|
||||||
|
|
||||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
|
||||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
|
||||||
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
@@ -0,0 +1,801 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapTextUnmarshalV2 = false
|
||||||
|
|
||||||
|
// ParseError is returned by UnmarshalText.
|
||||||
|
type ParseError struct {
|
||||||
|
Message string
|
||||||
|
|
||||||
|
// Deprecated: Do not use.
|
||||||
|
Line, Offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ParseError) Error() string {
|
||||||
|
if wrapTextUnmarshalV2 {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
if e.Line == 1 {
|
||||||
|
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText parses a proto text formatted string into m.
|
||||||
|
func UnmarshalText(s string, m Message) error {
|
||||||
|
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||||
|
return u.UnmarshalText([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Reset()
|
||||||
|
mi := MessageV2(m)
|
||||||
|
|
||||||
|
if wrapTextUnmarshalV2 {
|
||||||
|
err := prototext.UnmarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
}.Unmarshal([]byte(s), mi)
|
||||||
|
if err != nil {
|
||||||
|
return &ParseError{Message: err.Error()}
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
} else {
|
||||||
|
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type textParser struct {
|
||||||
|
s string // remaining input
|
||||||
|
done bool // whether the parsing is finished (success or error)
|
||||||
|
backed bool // whether back() was called
|
||||||
|
offset, line int
|
||||||
|
cur token
|
||||||
|
}
|
||||||
|
|
||||||
|
type token struct {
|
||||||
|
value string
|
||||||
|
err *ParseError
|
||||||
|
line int // line number
|
||||||
|
offset int // byte number from start of input, not start of line
|
||||||
|
unquoted string // the unquoted version of value, if it was a quoted string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTextParser(s string) *textParser {
|
||||||
|
p := new(textParser)
|
||||||
|
p.s = s
|
||||||
|
p.line = 1
|
||||||
|
p.cur.line = 1
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fds := md.Fields()
|
||||||
|
|
||||||
|
// A struct is a sequence of "name: value", terminated by one of
|
||||||
|
// '>' or '}', or the end of the input. A name may also be
|
||||||
|
// "[extension]" or "[type/url]".
|
||||||
|
//
|
||||||
|
// The whole struct can also be an expanded Any message, like:
|
||||||
|
// [type/url] < ... struct contents ... >
|
||||||
|
seen := make(map[protoreflect.FieldNumber]bool)
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value == "[" {
|
||||||
|
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a normal, non-extension field.
|
||||||
|
name := protoreflect.Name(tok.value)
|
||||||
|
fd := fds.ByName(name)
|
||||||
|
switch {
|
||||||
|
case fd == nil:
|
||||||
|
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||||
|
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||||
|
fd = gd
|
||||||
|
}
|
||||||
|
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||||
|
fd = nil
|
||||||
|
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||||
|
fd = nil
|
||||||
|
}
|
||||||
|
if fd == nil {
|
||||||
|
typeName := string(md.FullName())
|
||||||
|
if m, ok := m.Interface().(Message); ok {
|
||||||
|
t := reflect.TypeOf(m)
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
typeName = t.Elem().String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||||
|
}
|
||||||
|
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||||
|
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||||
|
}
|
||||||
|
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||||
|
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||||
|
}
|
||||||
|
seen[fd.Number()] = true
|
||||||
|
|
||||||
|
// Consume any colon.
|
||||||
|
if err := p.checkForColon(fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse into the field.
|
||||||
|
v := m.Get(fd)
|
||||||
|
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||||
|
v = m.Mutable(fd)
|
||||||
|
}
|
||||||
|
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||||
|
name, err := p.consumeExtensionOrAnyName()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it contains a slash, it's an Any type URL.
|
||||||
|
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
// consume an optional colon
|
||||||
|
if tok.value == ":" {
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||||
|
if err != nil {
|
||||||
|
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||||
|
}
|
||||||
|
m2 := mt.New()
|
||||||
|
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b, err := protoV2.Marshal(m2.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||||
|
valFD := m.Descriptor().Fields().ByName("value")
|
||||||
|
if seen[urlFD.Number()] {
|
||||||
|
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||||
|
}
|
||||||
|
if seen[valFD.Number()] {
|
||||||
|
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||||
|
}
|
||||||
|
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||||
|
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||||
|
seen[urlFD.Number()] = true
|
||||||
|
seen[valFD.Number()] = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
xname := protoreflect.FullName(name)
|
||||||
|
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||||
|
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||||
|
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||||
|
}
|
||||||
|
if xt == nil {
|
||||||
|
return p.errorf("unrecognized extension %q", name)
|
||||||
|
}
|
||||||
|
fd := xt.TypeDescriptor()
|
||||||
|
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||||
|
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.checkForColon(fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := m.Get(fd)
|
||||||
|
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||||
|
v = m.Mutable(fd)
|
||||||
|
}
|
||||||
|
v, err = p.unmarshalValue(v, fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Set(fd, v)
|
||||||
|
return p.consumeOptionalSeparator()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "" {
|
||||||
|
return v, p.errorf("unexpected EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
lv := v.List()
|
||||||
|
var err error
|
||||||
|
if tok.value == "[" {
|
||||||
|
// Repeated field with list notation, like [1,2,3].
|
||||||
|
for {
|
||||||
|
vv := lv.NewElement()
|
||||||
|
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(vv)
|
||||||
|
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "]" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if tok.value != "," {
|
||||||
|
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// One value of the repeated field.
|
||||||
|
p.back()
|
||||||
|
vv := lv.NewElement()
|
||||||
|
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
lv.Append(vv)
|
||||||
|
return v, nil
|
||||||
|
case fd.IsMap():
|
||||||
|
// The map entry should be this sequence of tokens:
|
||||||
|
// < key : KEY value : VALUE >
|
||||||
|
// However, implementations may omit key or value, and technically
|
||||||
|
// we should support them in any order.
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
default:
|
||||||
|
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyFD := fd.MapKey()
|
||||||
|
valFD := fd.MapValue()
|
||||||
|
|
||||||
|
mv := v.Map()
|
||||||
|
kv := keyFD.Default()
|
||||||
|
vv := mv.NewValue()
|
||||||
|
for {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
switch tok.value {
|
||||||
|
case "key":
|
||||||
|
if err := p.consumeToken(":"); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
case "value":
|
||||||
|
if err := p.checkForColon(valFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
p.back()
|
||||||
|
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mv.Set(kv.MapKey(), vv)
|
||||||
|
return v, nil
|
||||||
|
default:
|
||||||
|
p.back()
|
||||||
|
return p.unmarshalSingularValue(v, fd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return v, tok.err
|
||||||
|
}
|
||||||
|
if tok.value == "" {
|
||||||
|
return v, p.errorf("unexpected EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
switch tok.value {
|
||||||
|
case "true", "1", "t", "True":
|
||||||
|
return protoreflect.ValueOfBool(true), nil
|
||||||
|
case "false", "0", "f", "False":
|
||||||
|
return protoreflect.ValueOfBool(false), nil
|
||||||
|
}
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The C++ parser accepts large positive hex numbers that uses
|
||||||
|
// two's complement arithmetic to represent negative numbers.
|
||||||
|
// This feature is here for backwards compatibility with C++.
|
||||||
|
if strings.HasPrefix(tok.value, "0x") {
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The C++ parser accepts large positive hex numbers that uses
|
||||||
|
// two's complement arithmetic to represent negative numbers.
|
||||||
|
// This feature is here for backwards compatibility with C++.
|
||||||
|
if strings.HasPrefix(tok.value, "0x") {
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
// Ignore 'f' for compatibility with output generated by C++,
|
||||||
|
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||||
|
v := tok.value
|
||||||
|
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||||
|
v = v[:len(v)-len("f")]
|
||||||
|
}
|
||||||
|
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
// Ignore 'f' for compatibility with output generated by C++,
|
||||||
|
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||||
|
v := tok.value
|
||||||
|
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||||
|
v = v[:len(v)-len("f")]
|
||||||
|
}
|
||||||
|
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||||
|
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
if isQuote(tok.value[0]) {
|
||||||
|
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||||
|
}
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
if isQuote(tok.value[0]) {
|
||||||
|
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||||
|
}
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||||
|
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||||
|
}
|
||||||
|
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||||
|
if vd != nil {
|
||||||
|
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||||
|
}
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
var terminator string
|
||||||
|
switch tok.value {
|
||||||
|
case "{":
|
||||||
|
terminator = "}"
|
||||||
|
case "<":
|
||||||
|
terminator = ">"
|
||||||
|
default:
|
||||||
|
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
|
}
|
||||||
|
err := p.unmarshalMessage(v.Message(), terminator)
|
||||||
|
return v, err
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||||
|
}
|
||||||
|
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume a ':' from the input stream (if the next token is a colon),
|
||||||
|
// returning an error if a colon is needed but not present.
|
||||||
|
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ":" {
|
||||||
|
if fd.Message() == nil {
|
||||||
|
return p.errorf("expected ':', found %q", tok.value)
|
||||||
|
}
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||||
|
// the following ']'. It returns the name or URL consumed.
|
||||||
|
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", tok.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If extension name or type url is quoted, it's a single token.
|
||||||
|
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||||
|
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return name, p.consumeToken("]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume everything up to "]"
|
||||||
|
var parts []string
|
||||||
|
for tok.value != "]" {
|
||||||
|
parts = append(parts, tok.value)
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||||
|
}
|
||||||
|
if p.done && tok.value != "]" {
|
||||||
|
return "", p.errorf("unclosed type_url or extension name")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(parts, ""), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||||
|
// It is used in unmarshalMessage to provide backward compatibility.
|
||||||
|
func (p *textParser) consumeOptionalSeparator() error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != ";" && tok.value != "," {
|
||||||
|
p.back()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||||
|
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||||
|
p.cur.err = pe
|
||||||
|
p.done = true
|
||||||
|
return pe
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) skipWhitespace() {
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||||
|
if p.s[i] == '#' {
|
||||||
|
// comment; skip to end of line or input
|
||||||
|
for i < len(p.s) && p.s[i] != '\n' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == len(p.s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.s[i] == '\n' {
|
||||||
|
p.line++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
p.offset += i
|
||||||
|
p.s = p.s[i:len(p.s)]
|
||||||
|
if len(p.s) == 0 {
|
||||||
|
p.done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) advance() {
|
||||||
|
// Skip whitespace
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start of non-whitespace
|
||||||
|
p.cur.err = nil
|
||||||
|
p.cur.offset, p.cur.line = p.offset, p.line
|
||||||
|
p.cur.unquoted = ""
|
||||||
|
switch p.s[0] {
|
||||||
|
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||||
|
// Single symbol
|
||||||
|
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||||
|
case '"', '\'':
|
||||||
|
// Quoted string
|
||||||
|
i := 1
|
||||||
|
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||||
|
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||||
|
// skip escaped char
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||||
|
p.errorf("unmatched quote")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||||
|
if err != nil {
|
||||||
|
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||||
|
p.cur.unquoted = unq
|
||||||
|
default:
|
||||||
|
i := 0
|
||||||
|
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
p.errorf("unexpected byte %#x", p.s[0])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||||
|
}
|
||||||
|
p.offset += len(p.cur.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back off the parser by one token. Can only be done between calls to next().
|
||||||
|
// It makes the next advance() a no-op.
|
||||||
|
func (p *textParser) back() { p.backed = true }
|
||||||
|
|
||||||
|
// Advances the parser and returns the new current token.
|
||||||
|
func (p *textParser) next() *token {
|
||||||
|
if p.backed || p.done {
|
||||||
|
p.backed = false
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.done {
|
||||||
|
p.cur.value = ""
|
||||||
|
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||||
|
// Look for multiple quoted strings separated by whitespace,
|
||||||
|
// and concatenate them.
|
||||||
|
cat := p.cur
|
||||||
|
for {
|
||||||
|
p.skipWhitespace()
|
||||||
|
if p.done || !isQuote(p.s[0]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
p.advance()
|
||||||
|
if p.cur.err != nil {
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
cat.value += " " + p.cur.value
|
||||||
|
cat.unquoted += p.cur.unquoted
|
||||||
|
}
|
||||||
|
p.done = false // parser may have seen EOF, but we want to return cat
|
||||||
|
p.cur = cat
|
||||||
|
}
|
||||||
|
return &p.cur
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textParser) consumeToken(s string) error {
|
||||||
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != s {
|
||||||
|
p.back()
|
||||||
|
return p.errorf("expected %q, found %q", s, tok.value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||||
|
|
||||||
|
func unquoteC(s string, quote rune) (string, error) {
|
||||||
|
// This is based on C++'s tokenizer.cc.
|
||||||
|
// Despite its name, this is *not* parsing C syntax.
|
||||||
|
// For instance, "\0" is an invalid quoted string.
|
||||||
|
|
||||||
|
// Avoid allocation in trivial cases.
|
||||||
|
simple := true
|
||||||
|
for _, r := range s {
|
||||||
|
if r == '\\' || r == quote {
|
||||||
|
simple = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if simple {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 0, 3*len(s)/2)
|
||||||
|
for len(s) > 0 {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
if r != '\\' {
|
||||||
|
if r < utf8.RuneSelf {
|
||||||
|
buf = append(buf, byte(r))
|
||||||
|
} else {
|
||||||
|
buf = append(buf, string(r)...)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, tail, err := unescape(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
buf = append(buf, ch...)
|
||||||
|
s = tail
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unescape(s string) (ch string, tail string, err error) {
|
||||||
|
r, n := utf8.DecodeRuneInString(s)
|
||||||
|
if r == utf8.RuneError && n == 1 {
|
||||||
|
return "", "", errBadUTF8
|
||||||
|
}
|
||||||
|
s = s[n:]
|
||||||
|
switch r {
|
||||||
|
case 'a':
|
||||||
|
return "\a", s, nil
|
||||||
|
case 'b':
|
||||||
|
return "\b", s, nil
|
||||||
|
case 'f':
|
||||||
|
return "\f", s, nil
|
||||||
|
case 'n':
|
||||||
|
return "\n", s, nil
|
||||||
|
case 'r':
|
||||||
|
return "\r", s, nil
|
||||||
|
case 't':
|
||||||
|
return "\t", s, nil
|
||||||
|
case 'v':
|
||||||
|
return "\v", s, nil
|
||||||
|
case '?':
|
||||||
|
return "?", s, nil // trigraph workaround
|
||||||
|
case '\'', '"', '\\':
|
||||||
|
return string(r), s, nil
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
|
if len(s) < 2 {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||||
|
}
|
||||||
|
ss := string(r) + s[:2]
|
||||||
|
s = s[2:]
|
||||||
|
i, err := strconv.ParseUint(ss, 8, 8)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||||
|
}
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
case 'x', 'X', 'u', 'U':
|
||||||
|
var n int
|
||||||
|
switch r {
|
||||||
|
case 'x', 'X':
|
||||||
|
n = 2
|
||||||
|
case 'u':
|
||||||
|
n = 4
|
||||||
|
case 'U':
|
||||||
|
n = 8
|
||||||
|
}
|
||||||
|
if len(s) < n {
|
||||||
|
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||||
|
}
|
||||||
|
ss := s[:n]
|
||||||
|
s = s[n:]
|
||||||
|
i, err := strconv.ParseUint(ss, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||||
|
}
|
||||||
|
if r == 'x' || r == 'X' {
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
}
|
||||||
|
if i > utf8.MaxRune {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||||
|
}
|
||||||
|
return string(i), s, nil
|
||||||
|
}
|
||||||
|
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIdentOrNumberChar(c byte) bool {
|
||||||
|
switch {
|
||||||
|
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||||
|
return true
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '-', '+', '.', '_':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWhitespace(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case ' ', '\t', '\n', '\r':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isQuote(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case '"', '\'':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
@@ -0,0 +1,560 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wrapTextMarshalV2 = false
|
||||||
|
|
||||||
|
// TextMarshaler is a configurable text format marshaler.
|
||||||
|
type TextMarshaler struct {
|
||||||
|
Compact bool // use compact text format (one line)
|
||||||
|
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal writes the proto text format of m to w.
|
||||||
|
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||||
|
b, err := tm.marshal(m)
|
||||||
|
if len(b) > 0 {
|
||||||
|
if _, err := w.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text returns a proto text formatted string of m.
|
||||||
|
func (tm *TextMarshaler) Text(m Message) string {
|
||||||
|
b, _ := tm.marshal(m)
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||||
|
mr := MessageReflect(m)
|
||||||
|
if mr == nil || !mr.IsValid() {
|
||||||
|
return []byte("<nil>"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapTextMarshalV2 {
|
||||||
|
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||||
|
return m.MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := prototext.MarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
EmitUnknown: true,
|
||||||
|
}
|
||||||
|
if !tm.Compact {
|
||||||
|
opts.Indent = " "
|
||||||
|
}
|
||||||
|
if !tm.ExpandAny {
|
||||||
|
opts.Resolver = (*protoregistry.Types)(nil)
|
||||||
|
}
|
||||||
|
return opts.Marshal(mr.Interface())
|
||||||
|
} else {
|
||||||
|
w := &textWriter{
|
||||||
|
compact: tm.Compact,
|
||||||
|
expandAny: tm.ExpandAny,
|
||||||
|
complete: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||||
|
b, err := m.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w.Write(b)
|
||||||
|
return w.buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.writeMessage(mr)
|
||||||
|
return w.buf, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultTextMarshaler = TextMarshaler{}
|
||||||
|
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalText writes the proto text format of m to w.
|
||||||
|
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||||
|
|
||||||
|
// MarshalTextString returns a proto text formatted string of m.
|
||||||
|
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||||
|
|
||||||
|
// CompactText writes the compact proto text format of m to w.
|
||||||
|
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||||
|
|
||||||
|
// CompactTextString returns a compact proto text formatted string of m.
|
||||||
|
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||||
|
|
||||||
|
var (
|
||||||
|
newline = []byte("\n")
|
||||||
|
endBraceNewline = []byte("}\n")
|
||||||
|
posInf = []byte("inf")
|
||||||
|
negInf = []byte("-inf")
|
||||||
|
nan = []byte("nan")
|
||||||
|
)
|
||||||
|
|
||||||
|
// textWriter is an io.Writer that tracks its indentation level.
|
||||||
|
type textWriter struct {
|
||||||
|
compact bool // same as TextMarshaler.Compact
|
||||||
|
expandAny bool // same as TextMarshaler.ExpandAny
|
||||||
|
complete bool // whether the current position is a complete line
|
||||||
|
indent int // indentation level; never negative
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||||
|
newlines := bytes.Count(p, newline)
|
||||||
|
if newlines == 0 {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, p...)
|
||||||
|
w.complete = false
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
frags := bytes.SplitN(p, newline, newlines+1)
|
||||||
|
if w.compact {
|
||||||
|
for i, frag := range frags {
|
||||||
|
if i > 0 {
|
||||||
|
w.buf = append(w.buf, ' ')
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, frag...)
|
||||||
|
n += len(frag)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, frag := range frags {
|
||||||
|
if w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, frag...)
|
||||||
|
n += len(frag)
|
||||||
|
if i+1 < len(frags) {
|
||||||
|
w.buf = append(w.buf, '\n')
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.complete = len(frags[len(frags)-1]) == 0
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) WriteByte(c byte) error {
|
||||||
|
if w.compact && c == '\n' {
|
||||||
|
c = ' '
|
||||||
|
}
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.buf = append(w.buf, c)
|
||||||
|
w.complete = c == '\n'
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||||
|
if !w.compact && w.complete {
|
||||||
|
w.writeIndent()
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
|
||||||
|
if fd.Kind() != protoreflect.GroupKind {
|
||||||
|
w.buf = append(w.buf, fd.Name()...)
|
||||||
|
w.WriteByte(':')
|
||||||
|
} else {
|
||||||
|
// Use message type name for group field name.
|
||||||
|
w.buf = append(w.buf, fd.Message().Name()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requiresQuotes(u string) bool {
|
||||||
|
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||||
|
for _, ch := range u {
|
||||||
|
switch {
|
||||||
|
case ch == '.' || ch == '/' || ch == '_':
|
||||||
|
continue
|
||||||
|
case '0' <= ch && ch <= '9':
|
||||||
|
continue
|
||||||
|
case 'A' <= ch && ch <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= ch && ch <= 'z':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||||
|
// required messages are not linked in).
|
||||||
|
//
|
||||||
|
// It returns (true, error) when sv was written in expanded format or an error
|
||||||
|
// was encountered.
|
||||||
|
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||||
|
md := m.Descriptor()
|
||||||
|
fdURL := md.Fields().ByName("type_url")
|
||||||
|
fdVal := md.Fields().ByName("value")
|
||||||
|
|
||||||
|
url := m.Get(fdURL).String()
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b := m.Get(fdVal).Bytes()
|
||||||
|
m2 := mt.New()
|
||||||
|
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
w.Write([]byte("["))
|
||||||
|
if requiresQuotes(url) {
|
||||||
|
w.writeQuotedString(url)
|
||||||
|
} else {
|
||||||
|
w.Write([]byte(url))
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("]:<"))
|
||||||
|
} else {
|
||||||
|
w.Write([]byte("]: <\n"))
|
||||||
|
w.indent++
|
||||||
|
}
|
||||||
|
if err := w.writeMessage(m2); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if w.compact {
|
||||||
|
w.Write([]byte("> "))
|
||||||
|
} else {
|
||||||
|
w.indent--
|
||||||
|
w.Write([]byte(">\n"))
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||||
|
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fds := md.Fields()
|
||||||
|
for i := 0; i < fds.Len(); {
|
||||||
|
fd := fds.Get(i)
|
||||||
|
if od := fd.ContainingOneof(); od != nil {
|
||||||
|
fd = m.WhichOneof(od)
|
||||||
|
i += od.Fields().Len()
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if fd == nil || !m.Has(fd) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList():
|
||||||
|
lv := m.Get(fd).List()
|
||||||
|
for j := 0; j < lv.Len(); j++ {
|
||||||
|
w.writeName(fd)
|
||||||
|
v := lv.Get(j)
|
||||||
|
if err := w.writeSingularValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
case fd.IsMap():
|
||||||
|
kfd := fd.MapKey()
|
||||||
|
vfd := fd.MapValue()
|
||||||
|
mv := m.Get(fd).Map()
|
||||||
|
|
||||||
|
type entry struct{ key, val protoreflect.Value }
|
||||||
|
var entries []entry
|
||||||
|
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
|
entries = append(entries, entry{k.Value(), v})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(entries, func(i, j int) bool {
|
||||||
|
switch kfd.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
return entries[i].key.Int() < entries[j].key.Int()
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return entries[i].key.String() < entries[j].key.String()
|
||||||
|
default:
|
||||||
|
panic("invalid kind")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
for _, entry := range entries {
|
||||||
|
w.writeName(fd)
|
||||||
|
w.WriteByte('<')
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
w.indent++
|
||||||
|
w.writeName(kfd)
|
||||||
|
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
w.writeName(vfd)
|
||||||
|
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
w.indent--
|
||||||
|
w.WriteByte('>')
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
w.writeName(fd)
|
||||||
|
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b := m.GetUnknown(); len(b) > 0 {
|
||||||
|
w.writeUnknownFields(b)
|
||||||
|
}
|
||||||
|
return w.writeExtensions(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
|
switch fd.Kind() {
|
||||||
|
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||||
|
switch vf := v.Float(); {
|
||||||
|
case math.IsInf(vf, +1):
|
||||||
|
w.Write(posInf)
|
||||||
|
case math.IsInf(vf, -1):
|
||||||
|
w.Write(negInf)
|
||||||
|
case math.IsNaN(vf):
|
||||||
|
w.Write(nan)
|
||||||
|
default:
|
||||||
|
fmt.Fprint(w, v.Interface())
|
||||||
|
}
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||||
|
w.writeQuotedString(string(v.String()))
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
w.writeQuotedString(string(v.Bytes()))
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
var bra, ket byte = '<', '>'
|
||||||
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
|
bra, ket = '{', '}'
|
||||||
|
}
|
||||||
|
w.WriteByte(bra)
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
w.indent++
|
||||||
|
m := v.Message()
|
||||||
|
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||||
|
b, err := m2.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.Write(b)
|
||||||
|
} else {
|
||||||
|
w.writeMessage(m)
|
||||||
|
}
|
||||||
|
w.indent--
|
||||||
|
w.WriteByte(ket)
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||||
|
fmt.Fprint(w, ev.Name())
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(w, v.Enum())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(w, v.Interface())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||||
|
func (w *textWriter) writeQuotedString(s string) {
|
||||||
|
w.WriteByte('"')
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch c := s[i]; c {
|
||||||
|
case '\n':
|
||||||
|
w.buf = append(w.buf, `\n`...)
|
||||||
|
case '\r':
|
||||||
|
w.buf = append(w.buf, `\r`...)
|
||||||
|
case '\t':
|
||||||
|
w.buf = append(w.buf, `\t`...)
|
||||||
|
case '"':
|
||||||
|
w.buf = append(w.buf, `\"`...)
|
||||||
|
case '\\':
|
||||||
|
w.buf = append(w.buf, `\\`...)
|
||||||
|
default:
|
||||||
|
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||||
|
w.buf = append(w.buf, c)
|
||||||
|
} else {
|
||||||
|
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteByte('"')
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||||
|
if !w.compact {
|
||||||
|
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(b) > 0 {
|
||||||
|
num, wtyp, n := protowire.ConsumeTag(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
if wtyp == protowire.EndGroupType {
|
||||||
|
w.indent--
|
||||||
|
w.Write(endBraceNewline)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprint(w, num)
|
||||||
|
if wtyp != protowire.StartGroupType {
|
||||||
|
w.WriteByte(':')
|
||||||
|
}
|
||||||
|
if !w.compact || wtyp == protowire.StartGroupType {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
switch wtyp {
|
||||||
|
case protowire.VarintType:
|
||||||
|
v, n := protowire.ConsumeVarint(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.Fixed32Type:
|
||||||
|
v, n := protowire.ConsumeFixed32(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.Fixed64Type:
|
||||||
|
v, n := protowire.ConsumeFixed64(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprint(w, v)
|
||||||
|
case protowire.BytesType:
|
||||||
|
v, n := protowire.ConsumeBytes(b)
|
||||||
|
if n < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
fmt.Fprintf(w, "%q", v)
|
||||||
|
case protowire.StartGroupType:
|
||||||
|
w.WriteByte('{')
|
||||||
|
w.indent++
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeExtensions writes all the extensions in m.
|
||||||
|
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||||
|
md := m.Descriptor()
|
||||||
|
if md.ExtensionRanges().Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ext struct {
|
||||||
|
desc protoreflect.FieldDescriptor
|
||||||
|
val protoreflect.Value
|
||||||
|
}
|
||||||
|
var exts []ext
|
||||||
|
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
|
if fd.IsExtension() {
|
||||||
|
exts = append(exts, ext{fd, v})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
sort.Slice(exts, func(i, j int) bool {
|
||||||
|
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, ext := range exts {
|
||||||
|
// For message set, use the name of the message as the extension name.
|
||||||
|
name := string(ext.desc.FullName())
|
||||||
|
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||||
|
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ext.desc.IsList() {
|
||||||
|
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
lv := ext.val.List()
|
||||||
|
for i := 0; i < lv.Len(); i++ {
|
||||||
|
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
|
fmt.Fprintf(w, "[%s]:", name)
|
||||||
|
if !w.compact {
|
||||||
|
w.WriteByte(' ')
|
||||||
|
}
|
||||||
|
if err := w.writeSingularValue(v, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteByte('\n')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *textWriter) writeIndent() {
|
||||||
|
if !w.complete {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < w.indent*2; i++ {
|
||||||
|
w.buf = append(w.buf, ' ')
|
||||||
|
}
|
||||||
|
w.complete = false
|
||||||
|
}
|
||||||
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@@ -1,880 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Functions for parsing the Text protocol buffer format.
|
|
||||||
// TODO: message sets.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error string emitted when deserializing Any and fields are already set
|
|
||||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
|
||||||
|
|
||||||
type ParseError struct {
|
|
||||||
Message string
|
|
||||||
Line int // 1-based line number
|
|
||||||
Offset int // 0-based byte offset from start of input
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParseError) Error() string {
|
|
||||||
if p.Line == 1 {
|
|
||||||
// show offset only for first line
|
|
||||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
type token struct {
|
|
||||||
value string
|
|
||||||
err *ParseError
|
|
||||||
line int // line number
|
|
||||||
offset int // byte number from start of input, not start of line
|
|
||||||
unquoted string // the unquoted version of value, if it was a quoted string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *token) String() string {
|
|
||||||
if t.err == nil {
|
|
||||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("parse error: %v", t.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
type textParser struct {
|
|
||||||
s string // remaining input
|
|
||||||
done bool // whether the parsing is finished (success or error)
|
|
||||||
backed bool // whether back() was called
|
|
||||||
offset, line int
|
|
||||||
cur token
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTextParser(s string) *textParser {
|
|
||||||
p := new(textParser)
|
|
||||||
p.s = s
|
|
||||||
p.line = 1
|
|
||||||
p.cur.line = 1
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
|
||||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
|
||||||
p.cur.err = pe
|
|
||||||
p.done = true
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
|
|
||||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
|
||||||
func isIdentOrNumberChar(c byte) bool {
|
|
||||||
switch {
|
|
||||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
|
||||||
return true
|
|
||||||
case '0' <= c && c <= '9':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '-', '+', '.', '_':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWhitespace(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case ' ', '\t', '\n', '\r':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isQuote(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case '"', '\'':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) skipWhitespace() {
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
|
||||||
if p.s[i] == '#' {
|
|
||||||
// comment; skip to end of line or input
|
|
||||||
for i < len(p.s) && p.s[i] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == len(p.s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.s[i] == '\n' {
|
|
||||||
p.line++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
p.offset += i
|
|
||||||
p.s = p.s[i:len(p.s)]
|
|
||||||
if len(p.s) == 0 {
|
|
||||||
p.done = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) advance() {
|
|
||||||
// Skip whitespace
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start of non-whitespace
|
|
||||||
p.cur.err = nil
|
|
||||||
p.cur.offset, p.cur.line = p.offset, p.line
|
|
||||||
p.cur.unquoted = ""
|
|
||||||
switch p.s[0] {
|
|
||||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
|
||||||
// Single symbol
|
|
||||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
|
||||||
case '"', '\'':
|
|
||||||
// Quoted string
|
|
||||||
i := 1
|
|
||||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
|
||||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
|
||||||
// skip escaped char
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
|
||||||
p.errorf("unmatched quote")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
|
||||||
if err != nil {
|
|
||||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
|
||||||
p.cur.unquoted = unq
|
|
||||||
default:
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
p.errorf("unexpected byte %#x", p.s[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
|
||||||
}
|
|
||||||
p.offset += len(p.cur.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
|
||||||
)
|
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
|
||||||
// This is based on C++'s tokenizer.cc.
|
|
||||||
// Despite its name, this is *not* parsing C syntax.
|
|
||||||
// For instance, "\0" is an invalid quoted string.
|
|
||||||
|
|
||||||
// Avoid allocation in trivial cases.
|
|
||||||
simple := true
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '\\' || r == quote {
|
|
||||||
simple = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if simple {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 0, 3*len(s)/2)
|
|
||||||
for len(s) > 0 {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
if r != '\\' {
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
buf = append(buf, byte(r))
|
|
||||||
} else {
|
|
||||||
buf = append(buf, string(r)...)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ch, tail, err := unescape(s)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
buf = append(buf, ch...)
|
|
||||||
s = tail
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unescape(s string) (ch string, tail string, err error) {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
switch r {
|
|
||||||
case 'a':
|
|
||||||
return "\a", s, nil
|
|
||||||
case 'b':
|
|
||||||
return "\b", s, nil
|
|
||||||
case 'f':
|
|
||||||
return "\f", s, nil
|
|
||||||
case 'n':
|
|
||||||
return "\n", s, nil
|
|
||||||
case 'r':
|
|
||||||
return "\r", s, nil
|
|
||||||
case 't':
|
|
||||||
return "\t", s, nil
|
|
||||||
case 'v':
|
|
||||||
return "\v", s, nil
|
|
||||||
case '?':
|
|
||||||
return "?", s, nil // trigraph workaround
|
|
||||||
case '\'', '"', '\\':
|
|
||||||
return string(r), s, nil
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
if len(s) < 2 {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
|
||||||
}
|
|
||||||
ss := string(r) + s[:2]
|
|
||||||
s = s[2:]
|
|
||||||
i, err := strconv.ParseUint(ss, 8, 8)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
|
||||||
}
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
case 'x', 'X', 'u', 'U':
|
|
||||||
var n int
|
|
||||||
switch r {
|
|
||||||
case 'x', 'X':
|
|
||||||
n = 2
|
|
||||||
case 'u':
|
|
||||||
n = 4
|
|
||||||
case 'U':
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
if len(s) < n {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
|
||||||
}
|
|
||||||
ss := s[:n]
|
|
||||||
s = s[n:]
|
|
||||||
i, err := strconv.ParseUint(ss, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
|
||||||
}
|
|
||||||
if r == 'x' || r == 'X' {
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
}
|
|
||||||
if i > utf8.MaxRune {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
|
||||||
}
|
|
||||||
return string(i), s, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
|
||||||
// It makes the next advance() a no-op.
|
|
||||||
func (p *textParser) back() { p.backed = true }
|
|
||||||
|
|
||||||
// Advances the parser and returns the new current token.
|
|
||||||
func (p *textParser) next() *token {
|
|
||||||
if p.backed || p.done {
|
|
||||||
p.backed = false
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.done {
|
|
||||||
p.cur.value = ""
|
|
||||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
|
||||||
// Look for multiple quoted strings separated by whitespace,
|
|
||||||
// and concatenate them.
|
|
||||||
cat := p.cur
|
|
||||||
for {
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done || !isQuote(p.s[0]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.cur.err != nil {
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
cat.value += " " + p.cur.value
|
|
||||||
cat.unquoted += p.cur.unquoted
|
|
||||||
}
|
|
||||||
p.done = false // parser may have seen EOF, but we want to return cat
|
|
||||||
p.cur = cat
|
|
||||||
}
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) consumeToken(s string) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != s {
|
|
||||||
p.back()
|
|
||||||
return p.errorf("expected %q, found %q", s, tok.value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a RequiredNotSetError indicating which required field was not set.
|
|
||||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
for i := 0; i < st.NumField(); i++ {
|
|
||||||
if !isNil(sv.Field(i)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
props := sprops.Prop[i]
|
|
||||||
if props.Required {
|
|
||||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
|
||||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
|
||||||
i, ok := sprops.decoderOrigNames[name]
|
|
||||||
if ok {
|
|
||||||
return i, sprops.Prop[i], true
|
|
||||||
}
|
|
||||||
return -1, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume a ':' from the input stream (if the next token is a colon),
|
|
||||||
// returning an error if a colon is needed but not present.
|
|
||||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ":" {
|
|
||||||
// Colon is optional when the field is a group or message.
|
|
||||||
needColon := true
|
|
||||||
switch props.Wire {
|
|
||||||
case "group":
|
|
||||||
needColon = false
|
|
||||||
case "bytes":
|
|
||||||
// A "bytes" field is either a message, a string, or a repeated field;
|
|
||||||
// those three become *T, *string and []T respectively, so we can check for
|
|
||||||
// this field being a pointer to a non-string.
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
// *T or *string
|
|
||||||
if typ.Elem().Kind() == reflect.String {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.Slice {
|
|
||||||
// []T or []*T
|
|
||||||
if typ.Elem().Kind() != reflect.Ptr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.String {
|
|
||||||
// The proto3 exception is for a string field,
|
|
||||||
// which requires a colon.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
needColon = false
|
|
||||||
}
|
|
||||||
if needColon {
|
|
||||||
return p.errorf("expected ':', found %q", tok.value)
|
|
||||||
}
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|
||||||
st := sv.Type()
|
|
||||||
sprops := GetProperties(st)
|
|
||||||
reqCount := sprops.reqCount
|
|
||||||
var reqFieldErr error
|
|
||||||
fieldSet := make(map[string]bool)
|
|
||||||
// A struct is a sequence of "name: value", terminated by one of
|
|
||||||
// '>' or '}', or the end of the input. A name may also be
|
|
||||||
// "[extension]" or "[type/url]".
|
|
||||||
//
|
|
||||||
// The whole struct can also be an expanded Any message, like:
|
|
||||||
// [type/url] < ... struct contents ... >
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Looks like an extension or an Any.
|
|
||||||
//
|
|
||||||
// TODO: Check whether we need to handle
|
|
||||||
// namespace rooted names (e.g. ".something.Foo").
|
|
||||||
extName, err := p.consumeExtName()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
|
||||||
// If it contains a slash, it's an Any type URL.
|
|
||||||
messageName := extName[s+1:]
|
|
||||||
mt := MessageType(messageName)
|
|
||||||
if mt == nil {
|
|
||||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
|
||||||
}
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
// consume an optional colon
|
|
||||||
if tok.value == ":" {
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
v := reflect.New(mt.Elem())
|
|
||||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
b, err := Marshal(v.Interface().(Message))
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
|
||||||
}
|
|
||||||
if fieldSet["type_url"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
|
||||||
}
|
|
||||||
if fieldSet["value"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
|
||||||
}
|
|
||||||
sv.FieldByName("TypeUrl").SetString(extName)
|
|
||||||
sv.FieldByName("Value").SetBytes(b)
|
|
||||||
fieldSet["type_url"] = true
|
|
||||||
fieldSet["value"] = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var desc *ExtensionDesc
|
|
||||||
// This could be faster, but it's functional.
|
|
||||||
// TODO: Do something smarter than a linear scan.
|
|
||||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
|
||||||
if d.Name == extName {
|
|
||||||
desc = d
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if desc == nil {
|
|
||||||
return p.errorf("unrecognized extension %q", extName)
|
|
||||||
}
|
|
||||||
|
|
||||||
props := &Properties{}
|
|
||||||
props.Parse(desc.Tag)
|
|
||||||
|
|
||||||
typ := reflect.TypeOf(desc.ExtensionType)
|
|
||||||
if err := p.checkForColon(props, typ); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
rep := desc.repeated()
|
|
||||||
|
|
||||||
// Read the extension structure, and set it in
|
|
||||||
// the value we're constructing.
|
|
||||||
var ext reflect.Value
|
|
||||||
if !rep {
|
|
||||||
ext = reflect.New(typ).Elem()
|
|
||||||
} else {
|
|
||||||
ext = reflect.New(typ.Elem()).Elem()
|
|
||||||
}
|
|
||||||
if err := p.readAny(ext, props); err != nil {
|
|
||||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reqFieldErr = err
|
|
||||||
}
|
|
||||||
ep := sv.Addr().Interface().(Message)
|
|
||||||
if !rep {
|
|
||||||
SetExtension(ep, desc, ext.Interface())
|
|
||||||
} else {
|
|
||||||
old, err := GetExtension(ep, desc)
|
|
||||||
var sl reflect.Value
|
|
||||||
if err == nil {
|
|
||||||
sl = reflect.ValueOf(old) // existing slice
|
|
||||||
} else {
|
|
||||||
sl = reflect.MakeSlice(typ, 0, 1)
|
|
||||||
}
|
|
||||||
sl = reflect.Append(sl, ext)
|
|
||||||
SetExtension(ep, desc, sl.Interface())
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a normal, non-extension field.
|
|
||||||
name := tok.value
|
|
||||||
var dst reflect.Value
|
|
||||||
fi, props, ok := structFieldByName(sprops, name)
|
|
||||||
if ok {
|
|
||||||
dst = sv.Field(fi)
|
|
||||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
|
||||||
// It is a oneof.
|
|
||||||
props = oop.Prop
|
|
||||||
nv := reflect.New(oop.Type.Elem())
|
|
||||||
dst = nv.Elem().Field(0)
|
|
||||||
field := sv.Field(oop.Field)
|
|
||||||
if !field.IsNil() {
|
|
||||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
|
||||||
}
|
|
||||||
field.Set(nv)
|
|
||||||
}
|
|
||||||
if !dst.IsValid() {
|
|
||||||
return p.errorf("unknown field name %q in %v", name, st)
|
|
||||||
}
|
|
||||||
|
|
||||||
if dst.Kind() == reflect.Map {
|
|
||||||
// Consume any colon.
|
|
||||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct the map if it doesn't already exist.
|
|
||||||
if dst.IsNil() {
|
|
||||||
dst.Set(reflect.MakeMap(dst.Type()))
|
|
||||||
}
|
|
||||||
key := reflect.New(dst.Type().Key()).Elem()
|
|
||||||
val := reflect.New(dst.Type().Elem()).Elem()
|
|
||||||
|
|
||||||
// The map entry should be this sequence of tokens:
|
|
||||||
// < key : KEY value : VALUE >
|
|
||||||
// However, implementations may omit key or value, and technically
|
|
||||||
// we should support them in any order. See b/28924776 for a time
|
|
||||||
// this went wrong.
|
|
||||||
|
|
||||||
tok := p.next()
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch tok.value {
|
|
||||||
case "key":
|
|
||||||
if err := p.consumeToken(":"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "value":
|
|
||||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dst.SetMapIndex(key, val)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that it's not already set if it's not a repeated field.
|
|
||||||
if !props.Repeated && fieldSet[name] {
|
|
||||||
return p.errorf("non-repeated field %q was repeated", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse into the field.
|
|
||||||
fieldSet[name] = true
|
|
||||||
if err := p.readAny(dst, props); err != nil {
|
|
||||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reqFieldErr = err
|
|
||||||
}
|
|
||||||
if props.Required {
|
|
||||||
reqCount--
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if reqCount > 0 {
|
|
||||||
return p.missingRequiredFieldError(sv)
|
|
||||||
}
|
|
||||||
return reqFieldErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
|
||||||
// following ']'. It returns the name or URL consumed.
|
|
||||||
func (p *textParser) consumeExtName() (string, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", tok.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If extension name or type url is quoted, it's a single token.
|
|
||||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
|
||||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return name, p.consumeToken("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume everything up to "]"
|
|
||||||
var parts []string
|
|
||||||
for tok.value != "]" {
|
|
||||||
parts = append(parts, tok.value)
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
|
||||||
}
|
|
||||||
if p.done && tok.value != "]" {
|
|
||||||
return "", p.errorf("unclosed type_url or extension name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|
||||||
// It is used in readStruct to provide backward compatibility.
|
|
||||||
func (p *textParser) consumeOptionalSeparator() error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ";" && tok.value != "," {
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fv := v; fv.Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
at := v.Type()
|
|
||||||
if at.Elem().Kind() == reflect.Uint8 {
|
|
||||||
// Special case for []byte
|
|
||||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
|
||||||
// Deliberately written out here, as the error after
|
|
||||||
// this switch statement would write "invalid []byte: ...",
|
|
||||||
// which is not as user-friendly.
|
|
||||||
return p.errorf("invalid string: %v", tok.value)
|
|
||||||
}
|
|
||||||
bytes := []byte(tok.unquoted)
|
|
||||||
fv.Set(reflect.ValueOf(bytes))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Repeated field.
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Repeated field with list notation, like [1,2,3].
|
|
||||||
for {
|
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
|
||||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "]" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value != "," {
|
|
||||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// One value of the repeated field.
|
|
||||||
p.back()
|
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
|
||||||
case reflect.Bool:
|
|
||||||
// true/1/t/True or false/f/0/False.
|
|
||||||
switch tok.value {
|
|
||||||
case "true", "1", "t", "True":
|
|
||||||
fv.SetBool(true)
|
|
||||||
return nil
|
|
||||||
case "false", "0", "f", "False":
|
|
||||||
fv.SetBool(false)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
v := tok.value
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
|
||||||
// remove 'f' when the value is "-inf" or "inf".
|
|
||||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
|
||||||
v = v[:len(v)-1]
|
|
||||||
}
|
|
||||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
|
||||||
fv.SetFloat(f)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Int32:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
fv.SetInt(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(props.Enum) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
m, ok := enumValueMaps[props.Enum]
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
x, ok := m[tok.value]
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fv.SetInt(int64(x))
|
|
||||||
return nil
|
|
||||||
case reflect.Int64:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
|
||||||
fv.SetInt(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// A basic field (indirected through pointer), or a repeated message/group
|
|
||||||
p.back()
|
|
||||||
fv.Set(reflect.New(fv.Type().Elem()))
|
|
||||||
return p.readAny(fv.Elem(), props)
|
|
||||||
case reflect.String:
|
|
||||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
|
||||||
fv.SetString(tok.unquoted)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
|
||||||
return p.readStruct(fv, terminator)
|
|
||||||
case reflect.Uint32:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
fv.SetUint(uint64(x))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case reflect.Uint64:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
fv.SetUint(x)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
|
||||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
|
||||||
// If a required field is not set and no other error occurs,
|
|
||||||
// UnmarshalText returns *RequiredNotSetError.
|
|
||||||
func UnmarshalText(s string, pb Message) error {
|
|
||||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
|
||||||
return um.UnmarshalText([]byte(s))
|
|
||||||
}
|
|
||||||
pb.Reset()
|
|
||||||
v := reflect.ValueOf(pb)
|
|
||||||
return newTextParser(s).readStruct(v.Elem(), "")
|
|
||||||
}
|
|
||||||
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoV2 "google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/runtime/protoiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Size returns the size in bytes of the wire-format encoding of m.
|
||||||
|
func Size(m Message) int {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
mi := MessageV2(m)
|
||||||
|
return protoV2.Size(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal returns the wire-format encoding of m.
|
||||||
|
func Marshal(m Message) ([]byte, error) {
|
||||||
|
b, err := marshalAppend(nil, m, false)
|
||||||
|
if b == nil {
|
||||||
|
b = zeroBytes
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroBytes = make([]byte, 0, 0)
|
||||||
|
|
||||||
|
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||||
|
if m == nil {
|
||||||
|
return nil, ErrNil
|
||||||
|
}
|
||||||
|
mi := MessageV2(m)
|
||||||
|
nbuf, err := protoV2.MarshalOptions{
|
||||||
|
Deterministic: deterministic,
|
||||||
|
AllowPartial: true,
|
||||||
|
}.MarshalAppend(buf, mi)
|
||||||
|
if err != nil {
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
if len(buf) == len(nbuf) {
|
||||||
|
if !mi.ProtoReflect().IsValid() {
|
||||||
|
return buf, ErrNil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nbuf, checkRequiredNotSet(mi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||||
|
//
|
||||||
|
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||||
|
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||||
|
func Unmarshal(b []byte, m Message) error {
|
||||||
|
m.Reset()
|
||||||
|
return UnmarshalMerge(b, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||||
|
func UnmarshalMerge(b []byte, m Message) error {
|
||||||
|
mi := MessageV2(m)
|
||||||
|
out, err := protoV2.UnmarshalOptions{
|
||||||
|
AllowPartial: true,
|
||||||
|
Merge: true,
|
||||||
|
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||||
|
Buf: b,
|
||||||
|
Message: mi.ProtoReflect(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return checkRequiredNotSet(mi)
|
||||||
|
}
|
||||||
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
// Bool stores v in a new bool value and returns a pointer to it.
|
||||||
|
func Bool(v bool) *bool { return &v }
|
||||||
|
|
||||||
|
// Int stores v in a new int32 value and returns a pointer to it.
|
||||||
|
//
|
||||||
|
// Deprecated: Use Int32 instead.
|
||||||
|
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||||
|
|
||||||
|
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||||
|
func Int32(v int32) *int32 { return &v }
|
||||||
|
|
||||||
|
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||||
|
func Int64(v int64) *int64 { return &v }
|
||||||
|
|
||||||
|
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||||
|
func Uint32(v uint32) *uint32 { return &v }
|
||||||
|
|
||||||
|
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||||
|
func Uint64(v uint64) *uint64 { return &v }
|
||||||
|
|
||||||
|
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||||
|
func Float32(v float32) *float32 { return &v }
|
||||||
|
|
||||||
|
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||||
|
func Float64(v float64) *float64 { return &v }
|
||||||
|
|
||||||
|
// String stores v in a new string value and returns a pointer to it.
|
||||||
|
func String(v string) *string { return &v }
|
||||||
3047
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
3047
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
@@ -1,885 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Author: kenton@google.com (Kenton Varda)
|
|
||||||
// Based on original Protocol Buffers design by
|
|
||||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
|
||||||
//
|
|
||||||
// The messages in this file describe the definitions found in .proto files.
|
|
||||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
|
||||||
// without any other information (e.g. without reading its imports).
|
|
||||||
|
|
||||||
|
|
||||||
syntax = "proto2";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "DescriptorProtos";
|
|
||||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
|
|
||||||
// descriptor.proto must be optimized for speed because reflection-based
|
|
||||||
// algorithms don't work during bootstrapping.
|
|
||||||
option optimize_for = SPEED;
|
|
||||||
|
|
||||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
|
||||||
// files it parses.
|
|
||||||
message FileDescriptorSet {
|
|
||||||
repeated FileDescriptorProto file = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a complete .proto file.
|
|
||||||
message FileDescriptorProto {
|
|
||||||
optional string name = 1; // file name, relative to root of source tree
|
|
||||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
|
||||||
|
|
||||||
// Names of files imported by this file.
|
|
||||||
repeated string dependency = 3;
|
|
||||||
// Indexes of the public imported files in the dependency list above.
|
|
||||||
repeated int32 public_dependency = 10;
|
|
||||||
// Indexes of the weak imported files in the dependency list.
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
repeated int32 weak_dependency = 11;
|
|
||||||
|
|
||||||
// All top-level definitions in this file.
|
|
||||||
repeated DescriptorProto message_type = 4;
|
|
||||||
repeated EnumDescriptorProto enum_type = 5;
|
|
||||||
repeated ServiceDescriptorProto service = 6;
|
|
||||||
repeated FieldDescriptorProto extension = 7;
|
|
||||||
|
|
||||||
optional FileOptions options = 8;
|
|
||||||
|
|
||||||
// This field contains optional information about the original source code.
|
|
||||||
// You may safely remove this entire field without harming runtime
|
|
||||||
// functionality of the descriptors -- the information is needed only by
|
|
||||||
// development tools.
|
|
||||||
optional SourceCodeInfo source_code_info = 9;
|
|
||||||
|
|
||||||
// The syntax of the proto file.
|
|
||||||
// The supported values are "proto2" and "proto3".
|
|
||||||
optional string syntax = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a message type.
|
|
||||||
message DescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated FieldDescriptorProto field = 2;
|
|
||||||
repeated FieldDescriptorProto extension = 6;
|
|
||||||
|
|
||||||
repeated DescriptorProto nested_type = 3;
|
|
||||||
repeated EnumDescriptorProto enum_type = 4;
|
|
||||||
|
|
||||||
message ExtensionRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Exclusive.
|
|
||||||
|
|
||||||
optional ExtensionRangeOptions options = 3;
|
|
||||||
}
|
|
||||||
repeated ExtensionRange extension_range = 5;
|
|
||||||
|
|
||||||
repeated OneofDescriptorProto oneof_decl = 8;
|
|
||||||
|
|
||||||
optional MessageOptions options = 7;
|
|
||||||
|
|
||||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
|
||||||
// fields or extension ranges in the same message. Reserved ranges may
|
|
||||||
// not overlap.
|
|
||||||
message ReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Exclusive.
|
|
||||||
}
|
|
||||||
repeated ReservedRange reserved_range = 9;
|
|
||||||
// Reserved field names, which may not be used by fields in the same message.
|
|
||||||
// A given name may only be reserved once.
|
|
||||||
repeated string reserved_name = 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExtensionRangeOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a field within a message.
|
|
||||||
message FieldDescriptorProto {
|
|
||||||
enum Type {
|
|
||||||
// 0 is reserved for errors.
|
|
||||||
// Order is weird for historical reasons.
|
|
||||||
TYPE_DOUBLE = 1;
|
|
||||||
TYPE_FLOAT = 2;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT64 = 3;
|
|
||||||
TYPE_UINT64 = 4;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT32 = 5;
|
|
||||||
TYPE_FIXED64 = 6;
|
|
||||||
TYPE_FIXED32 = 7;
|
|
||||||
TYPE_BOOL = 8;
|
|
||||||
TYPE_STRING = 9;
|
|
||||||
// Tag-delimited aggregate.
|
|
||||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
|
||||||
// implementations should still be able to parse the group wire format and
|
|
||||||
// treat group fields as unknown fields.
|
|
||||||
TYPE_GROUP = 10;
|
|
||||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
|
||||||
|
|
||||||
// New in version 2.
|
|
||||||
TYPE_BYTES = 12;
|
|
||||||
TYPE_UINT32 = 13;
|
|
||||||
TYPE_ENUM = 14;
|
|
||||||
TYPE_SFIXED32 = 15;
|
|
||||||
TYPE_SFIXED64 = 16;
|
|
||||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
|
||||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Label {
|
|
||||||
// 0 is reserved for errors
|
|
||||||
LABEL_OPTIONAL = 1;
|
|
||||||
LABEL_REQUIRED = 2;
|
|
||||||
LABEL_REPEATED = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 3;
|
|
||||||
optional Label label = 4;
|
|
||||||
|
|
||||||
// If type_name is set, this need not be set. If both this and type_name
|
|
||||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
|
||||||
optional Type type = 5;
|
|
||||||
|
|
||||||
// For message and enum types, this is the name of the type. If the name
|
|
||||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
|
||||||
// rules are used to find the type (i.e. first the nested types within this
|
|
||||||
// message are searched, then within the parent, on up to the root
|
|
||||||
// namespace).
|
|
||||||
optional string type_name = 6;
|
|
||||||
|
|
||||||
// For extensions, this is the name of the type being extended. It is
|
|
||||||
// resolved in the same manner as type_name.
|
|
||||||
optional string extendee = 2;
|
|
||||||
|
|
||||||
// For numeric types, contains the original text representation of the value.
|
|
||||||
// For booleans, "true" or "false".
|
|
||||||
// For strings, contains the default text contents (not escaped in any way).
|
|
||||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
|
||||||
// TODO(kenton): Base-64 encode?
|
|
||||||
optional string default_value = 7;
|
|
||||||
|
|
||||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
|
||||||
// list. This field is a member of that oneof.
|
|
||||||
optional int32 oneof_index = 9;
|
|
||||||
|
|
||||||
// JSON name of this field. The value is set by protocol compiler. If the
|
|
||||||
// user has set a "json_name" option on this field, that option's value
|
|
||||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
|
||||||
// it to camelCase.
|
|
||||||
optional string json_name = 10;
|
|
||||||
|
|
||||||
optional FieldOptions options = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a oneof.
|
|
||||||
message OneofDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional OneofOptions options = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes an enum type.
|
|
||||||
message EnumDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated EnumValueDescriptorProto value = 2;
|
|
||||||
|
|
||||||
optional EnumOptions options = 3;
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved values may not be used by
|
|
||||||
// entries in the same enum. Reserved ranges may not overlap.
|
|
||||||
//
|
|
||||||
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
|
||||||
// is inclusive such that it can appropriately represent the entire int32
|
|
||||||
// domain.
|
|
||||||
message EnumReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Inclusive.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved numeric values may not be used
|
|
||||||
// by enum values in the same enum declaration. Reserved ranges may not
|
|
||||||
// overlap.
|
|
||||||
repeated EnumReservedRange reserved_range = 4;
|
|
||||||
|
|
||||||
// Reserved enum value names, which may not be reused. A given name may only
|
|
||||||
// be reserved once.
|
|
||||||
repeated string reserved_name = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a value within an enum.
|
|
||||||
message EnumValueDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 2;
|
|
||||||
|
|
||||||
optional EnumValueOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a service.
|
|
||||||
message ServiceDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
repeated MethodDescriptorProto method = 2;
|
|
||||||
|
|
||||||
optional ServiceOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a method of a service.
|
|
||||||
message MethodDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
// Input and output type names. These are resolved in the same way as
|
|
||||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
|
||||||
optional string input_type = 2;
|
|
||||||
optional string output_type = 3;
|
|
||||||
|
|
||||||
optional MethodOptions options = 4;
|
|
||||||
|
|
||||||
// Identifies if client streams multiple client messages
|
|
||||||
optional bool client_streaming = 5 [default = false];
|
|
||||||
// Identifies if server streams multiple server messages
|
|
||||||
optional bool server_streaming = 6 [default = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Options
|
|
||||||
|
|
||||||
// Each of the definitions above may have "options" attached. These are
|
|
||||||
// just annotations which may cause code to be generated slightly differently
|
|
||||||
// or may contain hints for code that manipulates protocol messages.
|
|
||||||
//
|
|
||||||
// Clients may define custom options as extensions of the *Options messages.
|
|
||||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
|
||||||
// store the values in them. Instead it stores them in a field in the *Options
|
|
||||||
// message called uninterpreted_option. This field must have the same name
|
|
||||||
// across all *Options messages. We then use this field to populate the
|
|
||||||
// extensions when we build a descriptor, at which point all protos have been
|
|
||||||
// parsed and so all extensions are known.
|
|
||||||
//
|
|
||||||
// Extension numbers for custom options may be chosen as follows:
|
|
||||||
// * For options which will only be used within a single application or
|
|
||||||
// organization, or for experimental options, use field numbers 50000
|
|
||||||
// through 99999. It is up to you to ensure that you do not use the
|
|
||||||
// same number for multiple options.
|
|
||||||
// * For options which will be published and used publicly by multiple
|
|
||||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
|
||||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
|
||||||
// Objective-C plugin) and your project website (if available) -- there's no
|
|
||||||
// need to explain how you intend to use them. Usually you only need one
|
|
||||||
// extension number. You can declare multiple options with only one extension
|
|
||||||
// number by putting them in a sub-message. See the Custom Options section of
|
|
||||||
// the docs for examples:
|
|
||||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
|
||||||
// If this turns out to be popular, a web service will be set up
|
|
||||||
// to automatically assign option numbers.
|
|
||||||
|
|
||||||
message FileOptions {
|
|
||||||
|
|
||||||
// Sets the Java package where classes generated from this .proto will be
|
|
||||||
// placed. By default, the proto package is used, but this is often
|
|
||||||
// inappropriate because proto packages do not normally start with backwards
|
|
||||||
// domain names.
|
|
||||||
optional string java_package = 1;
|
|
||||||
|
|
||||||
|
|
||||||
// If set, all the classes from the .proto file are wrapped in a single
|
|
||||||
// outer class with the given name. This applies to both Proto1
|
|
||||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
|
||||||
// a .proto always translates to a single class, but you may want to
|
|
||||||
// explicitly choose the class name).
|
|
||||||
optional string java_outer_classname = 8;
|
|
||||||
|
|
||||||
// If set true, then the Java code generator will generate a separate .java
|
|
||||||
// file for each top-level message, enum, and service defined in the .proto
|
|
||||||
// file. Thus, these types will *not* be nested inside the outer class
|
|
||||||
// named by java_outer_classname. However, the outer class will still be
|
|
||||||
// generated to contain the file's getDescriptor() method as well as any
|
|
||||||
// top-level extensions defined in the file.
|
|
||||||
optional bool java_multiple_files = 10 [default = false];
|
|
||||||
|
|
||||||
// This option does nothing.
|
|
||||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
|
||||||
|
|
||||||
// If set true, then the Java2 code generator will generate code that
|
|
||||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
|
||||||
// byte sequence to a string field.
|
|
||||||
// Message reflection will do the same.
|
|
||||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
|
||||||
// This option has no effect on when used with the lite runtime.
|
|
||||||
optional bool java_string_check_utf8 = 27 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// Generated classes can be optimized for speed or code size.
|
|
||||||
enum OptimizeMode {
|
|
||||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
|
||||||
// etc.
|
|
||||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
|
||||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
|
||||||
}
|
|
||||||
optional OptimizeMode optimize_for = 9 [default = SPEED];
|
|
||||||
|
|
||||||
// Sets the Go package where structs generated from this .proto will be
|
|
||||||
// placed. If omitted, the Go package will be derived from the following:
|
|
||||||
// - The basename of the package import path, if provided.
|
|
||||||
// - Otherwise, the package statement in the .proto file, if present.
|
|
||||||
// - Otherwise, the basename of the .proto file, without extension.
|
|
||||||
optional string go_package = 11;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Should generic services be generated in each language? "Generic" services
|
|
||||||
// are not specific to any particular RPC system. They are generated by the
|
|
||||||
// main code generators in each language (without additional plugins).
|
|
||||||
// Generic services were the only kind of service generation supported by
|
|
||||||
// early versions of google.protobuf.
|
|
||||||
//
|
|
||||||
// Generic services are now considered deprecated in favor of using plugins
|
|
||||||
// that generate code specific to your particular RPC system. Therefore,
|
|
||||||
// these default to false. Old code which depends on generic services should
|
|
||||||
// explicitly set them to true.
|
|
||||||
optional bool cc_generic_services = 16 [default = false];
|
|
||||||
optional bool java_generic_services = 17 [default = false];
|
|
||||||
optional bool py_generic_services = 18 [default = false];
|
|
||||||
optional bool php_generic_services = 42 [default = false];
|
|
||||||
|
|
||||||
// Is this file deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for everything in the file, or it will be completely ignored; in the very
|
|
||||||
// least, this is a formalization for deprecating files.
|
|
||||||
optional bool deprecated = 23 [default = false];
|
|
||||||
|
|
||||||
// Enables the use of arenas for the proto messages in this file. This applies
|
|
||||||
// only to generated classes for C++.
|
|
||||||
optional bool cc_enable_arenas = 31 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// Sets the objective c class prefix which is prepended to all objective c
|
|
||||||
// generated classes from this .proto. There is no default.
|
|
||||||
optional string objc_class_prefix = 36;
|
|
||||||
|
|
||||||
// Namespace for generated classes; defaults to the package.
|
|
||||||
optional string csharp_namespace = 37;
|
|
||||||
|
|
||||||
// By default Swift generators will take the proto package and CamelCase it
|
|
||||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
|
||||||
// defined. When this options is provided, they will use this value instead
|
|
||||||
// to prefix the types/symbols defined.
|
|
||||||
optional string swift_prefix = 39;
|
|
||||||
|
|
||||||
// Sets the php class prefix which is prepended to all php generated classes
|
|
||||||
// from this .proto. Default is empty.
|
|
||||||
optional string php_class_prefix = 40;
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated classes. Default
|
|
||||||
// is empty. When this option is empty, the package name will be used for
|
|
||||||
// determining the namespace.
|
|
||||||
optional string php_namespace = 41;
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated metadata classes.
|
|
||||||
// Default is empty. When this option is empty, the proto file name will be
|
|
||||||
// used for determining the namespace.
|
|
||||||
optional string php_metadata_namespace = 44;
|
|
||||||
|
|
||||||
// Use this option to change the package of ruby generated classes. Default
|
|
||||||
// is empty. When this option is not set, the package name will be used for
|
|
||||||
// determining the ruby package.
|
|
||||||
optional string ruby_package = 45;
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 38;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MessageOptions {
|
|
||||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
|
||||||
// This is provided for backwards-compatibility with the MessageSet wire
|
|
||||||
// format. You should not use this for any other reason: It's less
|
|
||||||
// efficient, has fewer features, and is more complicated.
|
|
||||||
//
|
|
||||||
// The message must be defined exactly as follows:
|
|
||||||
// message Foo {
|
|
||||||
// option message_set_wire_format = true;
|
|
||||||
// extensions 4 to max;
|
|
||||||
// }
|
|
||||||
// Note that the message cannot have any defined fields; MessageSets only
|
|
||||||
// have extensions.
|
|
||||||
//
|
|
||||||
// All extensions of your type must be singular messages; e.g. they cannot
|
|
||||||
// be int32s, enums, or repeated messages.
|
|
||||||
//
|
|
||||||
// Because this is an option, the above two restrictions are not enforced by
|
|
||||||
// the protocol compiler.
|
|
||||||
optional bool message_set_wire_format = 1 [default = false];
|
|
||||||
|
|
||||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
|
||||||
// conflict with a field of the same name. This is meant to make migration
|
|
||||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
|
||||||
optional bool no_standard_descriptor_accessor = 2 [default = false];
|
|
||||||
|
|
||||||
// Is this message deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the message, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating messages.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
// Whether the message is an automatically generated map entry type for the
|
|
||||||
// maps field.
|
|
||||||
//
|
|
||||||
// For maps fields:
|
|
||||||
// map<KeyType, ValueType> map_field = 1;
|
|
||||||
// The parsed descriptor looks like:
|
|
||||||
// message MapFieldEntry {
|
|
||||||
// option map_entry = true;
|
|
||||||
// optional KeyType key = 1;
|
|
||||||
// optional ValueType value = 2;
|
|
||||||
// }
|
|
||||||
// repeated MapFieldEntry map_field = 1;
|
|
||||||
//
|
|
||||||
// Implementations may choose not to generate the map_entry=true message, but
|
|
||||||
// use a native map in the target language to hold the keys and values.
|
|
||||||
// The reflection APIs in such implementations still need to work as
|
|
||||||
// if the field is a repeated message field.
|
|
||||||
//
|
|
||||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
|
||||||
// instead. The option should only be implicitly set by the proto compiler
|
|
||||||
// parser.
|
|
||||||
optional bool map_entry = 7;
|
|
||||||
|
|
||||||
reserved 8; // javalite_serializable
|
|
||||||
reserved 9; // javanano_as_lite
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message FieldOptions {
|
|
||||||
// The ctype option instructs the C++ code generator to use a different
|
|
||||||
// representation of the field than it normally would. See the specific
|
|
||||||
// options below. This option is not yet implemented in the open source
|
|
||||||
// release -- sorry, we'll try to include it in a future version!
|
|
||||||
optional CType ctype = 1 [default = STRING];
|
|
||||||
enum CType {
|
|
||||||
// Default mode.
|
|
||||||
STRING = 0;
|
|
||||||
|
|
||||||
CORD = 1;
|
|
||||||
|
|
||||||
STRING_PIECE = 2;
|
|
||||||
}
|
|
||||||
// The packed option can be enabled for repeated primitive fields to enable
|
|
||||||
// a more efficient representation on the wire. Rather than repeatedly
|
|
||||||
// writing the tag and type for each element, the entire array is encoded as
|
|
||||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
|
||||||
// false will avoid using packed encoding.
|
|
||||||
optional bool packed = 2;
|
|
||||||
|
|
||||||
// The jstype option determines the JavaScript type used for values of the
|
|
||||||
// field. The option is permitted only for 64 bit integral and fixed types
|
|
||||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
|
||||||
// is represented as JavaScript string, which avoids loss of precision that
|
|
||||||
// can happen when a large value is converted to a floating point JavaScript.
|
|
||||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
|
||||||
// use the JavaScript "number" type. The behavior of the default option
|
|
||||||
// JS_NORMAL is implementation dependent.
|
|
||||||
//
|
|
||||||
// This option is an enum to permit additional types to be added, e.g.
|
|
||||||
// goog.math.Integer.
|
|
||||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
|
||||||
enum JSType {
|
|
||||||
// Use the default type.
|
|
||||||
JS_NORMAL = 0;
|
|
||||||
|
|
||||||
// Use JavaScript strings.
|
|
||||||
JS_STRING = 1;
|
|
||||||
|
|
||||||
// Use JavaScript numbers.
|
|
||||||
JS_NUMBER = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
|
||||||
// fields. It means that when the outer message is initially parsed, the
|
|
||||||
// inner message's contents will not be parsed but instead stored in encoded
|
|
||||||
// form. The inner message will actually be parsed when it is first accessed.
|
|
||||||
//
|
|
||||||
// This is only a hint. Implementations are free to choose whether to use
|
|
||||||
// eager or lazy parsing regardless of the value of this option. However,
|
|
||||||
// setting this option true suggests that the protocol author believes that
|
|
||||||
// using lazy parsing on this field is worth the additional bookkeeping
|
|
||||||
// overhead typically needed to implement it.
|
|
||||||
//
|
|
||||||
// This option does not affect the public interface of any generated code;
|
|
||||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
|
||||||
// interface is not affected by this option; const methods remain safe to
|
|
||||||
// call from multiple threads concurrently, while non-const methods continue
|
|
||||||
// to require exclusive access.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Note that implementations may choose not to check required fields within
|
|
||||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
|
||||||
// may return true even if the inner message has missing required fields.
|
|
||||||
// This is necessary because otherwise the inner message would have to be
|
|
||||||
// parsed in order to perform the check, defeating the purpose of lazy
|
|
||||||
// parsing. An implementation which chooses not to check required fields
|
|
||||||
// must be consistent about it. That is, for any particular sub-message, the
|
|
||||||
// implementation must either *always* check its required fields, or *never*
|
|
||||||
// check its required fields, regardless of whether or not the message has
|
|
||||||
// been parsed.
|
|
||||||
optional bool lazy = 5 [default = false];
|
|
||||||
|
|
||||||
// Is this field deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for accessors, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating fields.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
optional bool weak = 10 [default = false];
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 4; // removed jtype
|
|
||||||
}
|
|
||||||
|
|
||||||
message OneofOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumOptions {
|
|
||||||
|
|
||||||
// Set this option to true to allow mapping different tag names to the same
|
|
||||||
// value.
|
|
||||||
optional bool allow_alias = 2;
|
|
||||||
|
|
||||||
// Is this enum deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating enums.
|
|
||||||
optional bool deprecated = 3 [default = false];
|
|
||||||
|
|
||||||
reserved 5; // javanano_as_lite
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumValueOptions {
|
|
||||||
// Is this enum value deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum value, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating enum values.
|
|
||||||
optional bool deprecated = 1 [default = false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServiceOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this service deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the service, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating services.
|
|
||||||
optional bool deprecated = 33 [default = false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MethodOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this method deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the method, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating methods.
|
|
||||||
optional bool deprecated = 33 [default = false];
|
|
||||||
|
|
||||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
|
||||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
|
||||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
|
||||||
enum IdempotencyLevel {
|
|
||||||
IDEMPOTENCY_UNKNOWN = 0;
|
|
||||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
|
||||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
|
||||||
}
|
|
||||||
optional IdempotencyLevel idempotency_level = 34
|
|
||||||
[default = IDEMPOTENCY_UNKNOWN];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// A message representing a option the parser does not recognize. This only
|
|
||||||
// appears in options protos created by the compiler::Parser class.
|
|
||||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
|
||||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
|
||||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
|
||||||
// in them.
|
|
||||||
message UninterpretedOption {
|
|
||||||
// The name of the uninterpreted option. Each string represents a segment in
|
|
||||||
// a dot-separated name. is_extension is true iff a segment represents an
|
|
||||||
// extension (denoted with parentheses in options specs in .proto files).
|
|
||||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
|
||||||
// "foo.(bar.baz).qux".
|
|
||||||
message NamePart {
|
|
||||||
required string name_part = 1;
|
|
||||||
required bool is_extension = 2;
|
|
||||||
}
|
|
||||||
repeated NamePart name = 2;
|
|
||||||
|
|
||||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
|
||||||
// identified it as during parsing. Exactly one of these should be set.
|
|
||||||
optional string identifier_value = 3;
|
|
||||||
optional uint64 positive_int_value = 4;
|
|
||||||
optional int64 negative_int_value = 5;
|
|
||||||
optional double double_value = 6;
|
|
||||||
optional bytes string_value = 7;
|
|
||||||
optional string aggregate_value = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Optional source code info
|
|
||||||
|
|
||||||
// Encapsulates information about the original source file from which a
|
|
||||||
// FileDescriptorProto was generated.
|
|
||||||
message SourceCodeInfo {
|
|
||||||
// A Location identifies a piece of source code in a .proto file which
|
|
||||||
// corresponds to a particular definition. This information is intended
|
|
||||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
|
||||||
// tools.
|
|
||||||
//
|
|
||||||
// For example, say we have a file like:
|
|
||||||
// message Foo {
|
|
||||||
// optional string foo = 1;
|
|
||||||
// }
|
|
||||||
// Let's look at just the field definition:
|
|
||||||
// optional string foo = 1;
|
|
||||||
// ^ ^^ ^^ ^ ^^^
|
|
||||||
// a bc de f ghi
|
|
||||||
// We have the following locations:
|
|
||||||
// span path represents
|
|
||||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
|
||||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
|
||||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
|
||||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
|
||||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
|
||||||
//
|
|
||||||
// Notes:
|
|
||||||
// - A location may refer to a repeated field itself (i.e. not to any
|
|
||||||
// particular index within it). This is used whenever a set of elements are
|
|
||||||
// logically enclosed in a single code segment. For example, an entire
|
|
||||||
// extend block (possibly containing multiple extension definitions) will
|
|
||||||
// have an outer location whose path refers to the "extensions" repeated
|
|
||||||
// field without an index.
|
|
||||||
// - Multiple locations may have the same path. This happens when a single
|
|
||||||
// logical declaration is spread out across multiple places. The most
|
|
||||||
// obvious example is the "extend" block again -- there may be multiple
|
|
||||||
// extend blocks in the same scope, each of which will have the same path.
|
|
||||||
// - A location's span is not always a subset of its parent's span. For
|
|
||||||
// example, the "extendee" of an extension declaration appears at the
|
|
||||||
// beginning of the "extend" block and is shared by all extensions within
|
|
||||||
// the block.
|
|
||||||
// - Just because a location's span is a subset of some other location's span
|
|
||||||
// does not mean that it is a descendant. For example, a "group" defines
|
|
||||||
// both a type and a field in a single declaration. Thus, the locations
|
|
||||||
// corresponding to the type and field and their components will overlap.
|
|
||||||
// - Code which tries to interpret locations should probably be designed to
|
|
||||||
// ignore those that it doesn't understand, as more types of locations could
|
|
||||||
// be recorded in the future.
|
|
||||||
repeated Location location = 1;
|
|
||||||
message Location {
|
|
||||||
// Identifies which part of the FileDescriptorProto was defined at this
|
|
||||||
// location.
|
|
||||||
//
|
|
||||||
// Each element is a field number or an index. They form a path from
|
|
||||||
// the root FileDescriptorProto to the place where the definition. For
|
|
||||||
// example, this path:
|
|
||||||
// [ 4, 3, 2, 7, 1 ]
|
|
||||||
// refers to:
|
|
||||||
// file.message_type(3) // 4, 3
|
|
||||||
// .field(7) // 2, 7
|
|
||||||
// .name() // 1
|
|
||||||
// This is because FileDescriptorProto.message_type has field number 4:
|
|
||||||
// repeated DescriptorProto message_type = 4;
|
|
||||||
// and DescriptorProto.field has field number 2:
|
|
||||||
// repeated FieldDescriptorProto field = 2;
|
|
||||||
// and FieldDescriptorProto.name has field number 1:
|
|
||||||
// optional string name = 1;
|
|
||||||
//
|
|
||||||
// Thus, the above path gives the location of a field name. If we removed
|
|
||||||
// the last element:
|
|
||||||
// [ 4, 3, 2, 7 ]
|
|
||||||
// this path refers to the whole field declaration (from the beginning
|
|
||||||
// of the label to the terminating semicolon).
|
|
||||||
repeated int32 path = 1 [packed = true];
|
|
||||||
|
|
||||||
// Always has exactly three or four elements: start line, start column,
|
|
||||||
// end line (optional, otherwise assumed same as start line), end column.
|
|
||||||
// These are packed into a single field for efficiency. Note that line
|
|
||||||
// and column numbers are zero-based -- typically you will want to add
|
|
||||||
// 1 to each before displaying to a user.
|
|
||||||
repeated int32 span = 2 [packed = true];
|
|
||||||
|
|
||||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
|
||||||
// comments appearing before and after the declaration which appear to be
|
|
||||||
// attached to the declaration.
|
|
||||||
//
|
|
||||||
// A series of line comments appearing on consecutive lines, with no other
|
|
||||||
// tokens appearing on those lines, will be treated as a single comment.
|
|
||||||
//
|
|
||||||
// leading_detached_comments will keep paragraphs of comments that appear
|
|
||||||
// before (but not connected to) the current element. Each paragraph,
|
|
||||||
// separated by empty lines, will be one comment element in the repeated
|
|
||||||
// field.
|
|
||||||
//
|
|
||||||
// Only the comment content is provided; comment markers (e.g. //) are
|
|
||||||
// stripped out. For block comments, leading whitespace and an asterisk
|
|
||||||
// will be stripped from the beginning of each line other than the first.
|
|
||||||
// Newlines are included in the output.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
//
|
|
||||||
// optional int32 foo = 1; // Comment attached to foo.
|
|
||||||
// // Comment attached to bar.
|
|
||||||
// optional int32 bar = 2;
|
|
||||||
//
|
|
||||||
// optional string baz = 3;
|
|
||||||
// // Comment attached to baz.
|
|
||||||
// // Another line attached to baz.
|
|
||||||
//
|
|
||||||
// // Comment attached to qux.
|
|
||||||
// //
|
|
||||||
// // Another line attached to qux.
|
|
||||||
// optional double qux = 4;
|
|
||||||
//
|
|
||||||
// // Detached comment for corge. This is not leading or trailing comments
|
|
||||||
// // to qux or corge because there are blank lines separating it from
|
|
||||||
// // both.
|
|
||||||
//
|
|
||||||
// // Detached comment for corge paragraph 2.
|
|
||||||
//
|
|
||||||
// optional string corge = 5;
|
|
||||||
// /* Block comment attached
|
|
||||||
// * to corge. Leading asterisks
|
|
||||||
// * will be removed. */
|
|
||||||
// /* Block comment attached to
|
|
||||||
// * grault. */
|
|
||||||
// optional int32 grault = 6;
|
|
||||||
//
|
|
||||||
// // ignored detached comments.
|
|
||||||
optional string leading_comments = 3;
|
|
||||||
optional string trailing_comments = 4;
|
|
||||||
repeated string leading_detached_comments = 6;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes the relationship between generated code and its original source
|
|
||||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
|
||||||
// source file, but may contain references to different source .proto files.
|
|
||||||
message GeneratedCodeInfo {
|
|
||||||
// An Annotation connects some span of text in generated code to an element
|
|
||||||
// of its generating .proto file.
|
|
||||||
repeated Annotation annotation = 1;
|
|
||||||
message Annotation {
|
|
||||||
// Identifies the element in the original source .proto file. This field
|
|
||||||
// is formatted the same as SourceCodeInfo.Location.path.
|
|
||||||
repeated int32 path = 1 [packed = true];
|
|
||||||
|
|
||||||
// Identifies the filesystem path to the original source .proto.
|
|
||||||
optional string source_file = 2;
|
|
||||||
|
|
||||||
// Identifies the starting offset in bytes in the generated code
|
|
||||||
// that relates to the identified object.
|
|
||||||
optional int32 begin = 3;
|
|
||||||
|
|
||||||
// Identifies the ending offset in bytes in the generated code that
|
|
||||||
// relates to the identified offset. The end offset should be one past
|
|
||||||
// the last relevant byte (so the length of the text = end - begin).
|
|
||||||
optional int32 end = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
230
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
230
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
@@ -1,141 +1,165 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements functions to marshal proto.Message to/from
|
|
||||||
// google.protobuf.Any message.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes/any"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
|
||||||
|
anypb "github.com/golang/protobuf/ptypes/any"
|
||||||
)
|
)
|
||||||
|
|
||||||
const googleApis = "type.googleapis.com/"
|
const urlPrefix = "type.googleapis.com/"
|
||||||
|
|
||||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||||
//
|
// Most type assertions should use the Is function instead.
|
||||||
// Note that regular type assertions should be done using the Is
|
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
name, err := anyMessageName(any)
|
||||||
// sequence of Any messages based on a set of allowed message type names.
|
return string(name), err
|
||||||
func AnyMessageName(any *any.Any) (string, error) {
|
}
|
||||||
|
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||||
if any == nil {
|
if any == nil {
|
||||||
return "", fmt.Errorf("message is nil")
|
return "", fmt.Errorf("message is nil")
|
||||||
}
|
}
|
||||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
name := protoreflect.FullName(any.TypeUrl)
|
||||||
if slash < 0 {
|
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||||
|
name = name[i+len("/"):]
|
||||||
|
}
|
||||||
|
if !name.IsValid() {
|
||||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||||
}
|
}
|
||||||
return any.TypeUrl[slash+1:], nil
|
return name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||||
value, err := proto.Marshal(pb)
|
switch dm := m.(type) {
|
||||||
|
case DynamicAny:
|
||||||
|
m = dm.Message
|
||||||
|
case *DynamicAny:
|
||||||
|
if dm == nil {
|
||||||
|
return nil, proto.ErrNil
|
||||||
|
}
|
||||||
|
m = dm.Message
|
||||||
|
}
|
||||||
|
b, err := proto.Marshal(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||||
|
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||||
|
// be resolved in the global registry.
|
||||||
|
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||||
|
name, err := anyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.MessageV1(mt.New().Interface()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||||
|
// into the provided message m. It returns an error if the target message
|
||||||
|
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||||
|
//
|
||||||
|
// The target message m may be a *DynamicAny message. If the underlying message
|
||||||
|
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||||
|
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||||
|
if dm, ok := m.(*DynamicAny); ok {
|
||||||
|
if dm.Message == nil {
|
||||||
|
var err error
|
||||||
|
dm.Message, err = Empty(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m = dm.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
anyName, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
msgName := proto.MessageName(m)
|
||||||
|
if anyName != msgName {
|
||||||
|
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(any.Value, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is reports whether the Any message contains a message of the specified type.
|
||||||
|
func Is(any *anypb.Any, m proto.Message) bool {
|
||||||
|
if any == nil || m == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
name := proto.MessageName(m)
|
||||||
|
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||||
}
|
}
|
||||||
|
|
||||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||||
// message. The allocated message is stored in the embedded proto.Message.
|
// The allocated message is stored in the embedded proto.Message.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
|
||||||
// var x ptypes.DynamicAny
|
// var x ptypes.DynamicAny
|
||||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
type DynamicAny struct {
|
type DynamicAny struct{ proto.Message }
|
||||||
proto.Message
|
|
||||||
|
func (m DynamicAny) String() string {
|
||||||
|
if m.Message == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return m.Message.String()
|
||||||
|
}
|
||||||
|
func (m DynamicAny) Reset() {
|
||||||
|
if m.Message == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.Message.Reset()
|
||||||
|
}
|
||||||
|
func (m DynamicAny) ProtoMessage() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||||
|
if m.Message == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty returns a new proto.Message of the type specified in a
|
type dynamicAny struct{ protoreflect.Message }
|
||||||
// google.protobuf.Any message. It returns an error if corresponding message
|
|
||||||
// type isn't linked in.
|
|
||||||
func Empty(any *any.Any) (proto.Message, error) {
|
|
||||||
aname, err := AnyMessageName(any)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t := proto.MessageType(aname)
|
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||||
if t == nil {
|
return dynamicAnyType{m.Message.Type()}
|
||||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
}
|
||||||
}
|
func (m dynamicAny) New() protoreflect.Message {
|
||||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
return dynamicAnyType{m.Message.Type()}.New()
|
||||||
|
}
|
||||||
|
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||||
|
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||||
// message and places the decoded result in pb. It returns an error if type of
|
|
||||||
// contents of Any message does not match type of pb message.
|
|
||||||
//
|
|
||||||
// pb can be a proto.Message, or a *DynamicAny.
|
|
||||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
|
||||||
if d, ok := pb.(*DynamicAny); ok {
|
|
||||||
if d.Message == nil {
|
|
||||||
var err error
|
|
||||||
d.Message, err = Empty(any)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return UnmarshalAny(any, d.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
aname, err := AnyMessageName(any)
|
func (t dynamicAnyType) New() protoreflect.Message {
|
||||||
if err != nil {
|
return dynamicAny{t.MessageType.New()}
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mname := proto.MessageName(pb)
|
|
||||||
if aname != mname {
|
|
||||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
|
||||||
}
|
|
||||||
return proto.Unmarshal(any.Value, pb)
|
|
||||||
}
|
}
|
||||||
|
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||||
// Is returns true if any value contains a given message type.
|
return dynamicAny{t.MessageType.Zero()}
|
||||||
func Is(any *any.Any, pb proto.Message) bool {
|
|
||||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
|
||||||
// but it avoids scanning TypeUrl for the slash.
|
|
||||||
if any == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
name := proto.MessageName(pb)
|
|
||||||
prefix := len(any.TypeUrl) - len(name)
|
|
||||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
|
||||||
}
|
}
|
||||||
|
|||||||
235
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
235
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@@ -1,203 +1,62 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/any.proto
|
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
|
||||||
package any
|
package any
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/any.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Any = anypb.Any
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||||
// URL that describes the type of the serialized message.
|
|
||||||
//
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||||
// Protobuf library provides support to pack/unpack Any values in the form
|
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// of utility functions or additional generated methods of the Any type.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
//
|
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
// Example 1: Pack and unpack a message in C++.
|
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||||
//
|
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||||
// Foo foo = ...;
|
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||||
// Any any;
|
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||||
// any.PackFrom(foo);
|
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||||
// ...
|
0x74, 0x6f, 0x33,
|
||||||
// if (any.UnpackTo(&foo)) {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Pack and unpack a message in Java.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any = Any.pack(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.is(Foo.class)) {
|
|
||||||
// foo = any.unpack(Foo.class);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Pack and unpack a message in Python.
|
|
||||||
//
|
|
||||||
// foo = Foo(...)
|
|
||||||
// any = Any()
|
|
||||||
// any.Pack(foo)
|
|
||||||
// ...
|
|
||||||
// if any.Is(Foo.DESCRIPTOR):
|
|
||||||
// any.Unpack(foo)
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
// Example 4: Pack and unpack a message in Go
|
|
||||||
//
|
|
||||||
// foo := &pb.Foo{...}
|
|
||||||
// any, err := ptypes.MarshalAny(foo)
|
|
||||||
// ...
|
|
||||||
// foo := &pb.Foo{}
|
|
||||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The pack methods provided by protobuf library will by default use
|
|
||||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
|
||||||
// methods only use the fully qualified type name after the last '/'
|
|
||||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
|
||||||
// name "y.z".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// JSON
|
|
||||||
// ====
|
|
||||||
// The JSON representation of an `Any` value uses the regular
|
|
||||||
// representation of the deserialized, embedded message, with an
|
|
||||||
// additional field `@type` which contains the type URL. Example:
|
|
||||||
//
|
|
||||||
// package google.profile;
|
|
||||||
// message Person {
|
|
||||||
// string first_name = 1;
|
|
||||||
// string last_name = 2;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.profile.Person",
|
|
||||||
// "firstName": <string>,
|
|
||||||
// "lastName": <string>
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the embedded message type is well-known and has a custom JSON
|
|
||||||
// representation, that representation will be embedded adding a field
|
|
||||||
// `value` which holds the custom JSON in addition to the `@type`
|
|
||||||
// field. Example (for message [google.protobuf.Duration][]):
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
|
||||||
// "value": "1.212s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type Any struct {
|
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
|
||||||
// protocol buffer message. This string must contain at least
|
|
||||||
// one "/" character. The last segment of the URL's path must represent
|
|
||||||
// the fully qualified name of the type (as in
|
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
|
||||||
// (e.g., leading "." is not accepted).
|
|
||||||
//
|
|
||||||
// In practice, teams usually precompile into the binary all types that they
|
|
||||||
// expect it to use in the context of Any. However, for URLs which use the
|
|
||||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
|
||||||
// server that maps type URLs to message definitions as follows:
|
|
||||||
//
|
|
||||||
// * If no scheme is provided, `https` is assumed.
|
|
||||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
|
||||||
// value in binary format, or produce an error.
|
|
||||||
// * Applications are allowed to cache lookup results based on the
|
|
||||||
// URL, or have them precompiled into a binary to avoid any
|
|
||||||
// lookup. Therefore, binary compatibility needs to be preserved
|
|
||||||
// on changes to types. (Use versioned type names to manage
|
|
||||||
// breaking changes.)
|
|
||||||
//
|
|
||||||
// Note: this functionality is not currently available in the official
|
|
||||||
// protobuf release, and it is not used for type URLs beginning with
|
|
||||||
// type.googleapis.com.
|
|
||||||
//
|
|
||||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
|
||||||
// used with implementation specific semantics.
|
|
||||||
//
|
|
||||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
|
||||||
// Must be a valid serialized protocol buffer of the above specified type.
|
|
||||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Any) Reset() { *m = Any{} }
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||||
func (*Any) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Any) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Any.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Any.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Any) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Any) GetTypeUrl() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.TypeUrl
|
|
||||||
}
|
}
|
||||||
return ""
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Any) GetValue() []byte {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||||
return m.Value
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return nil
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
|
||||||
// 185 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
|
||||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
|
||||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
|
||||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
|
||||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
|
||||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
|
||||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
|
||||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
|
||||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
|
||||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
|
||||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|||||||
155
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
155
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
@@ -1,155 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "AnyProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
|
||||||
// URL that describes the type of the serialized message.
|
|
||||||
//
|
|
||||||
// Protobuf library provides support to pack/unpack Any values in the form
|
|
||||||
// of utility functions or additional generated methods of the Any type.
|
|
||||||
//
|
|
||||||
// Example 1: Pack and unpack a message in C++.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any;
|
|
||||||
// any.PackFrom(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.UnpackTo(&foo)) {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Pack and unpack a message in Java.
|
|
||||||
//
|
|
||||||
// Foo foo = ...;
|
|
||||||
// Any any = Any.pack(foo);
|
|
||||||
// ...
|
|
||||||
// if (any.is(Foo.class)) {
|
|
||||||
// foo = any.unpack(Foo.class);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Pack and unpack a message in Python.
|
|
||||||
//
|
|
||||||
// foo = Foo(...)
|
|
||||||
// any = Any()
|
|
||||||
// any.Pack(foo)
|
|
||||||
// ...
|
|
||||||
// if any.Is(Foo.DESCRIPTOR):
|
|
||||||
// any.Unpack(foo)
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
// Example 4: Pack and unpack a message in Go
|
|
||||||
//
|
|
||||||
// foo := &pb.Foo{...}
|
|
||||||
// any, err := ptypes.MarshalAny(foo)
|
|
||||||
// ...
|
|
||||||
// foo := &pb.Foo{}
|
|
||||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The pack methods provided by protobuf library will by default use
|
|
||||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
|
||||||
// methods only use the fully qualified type name after the last '/'
|
|
||||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
|
||||||
// name "y.z".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// JSON
|
|
||||||
// ====
|
|
||||||
// The JSON representation of an `Any` value uses the regular
|
|
||||||
// representation of the deserialized, embedded message, with an
|
|
||||||
// additional field `@type` which contains the type URL. Example:
|
|
||||||
//
|
|
||||||
// package google.profile;
|
|
||||||
// message Person {
|
|
||||||
// string first_name = 1;
|
|
||||||
// string last_name = 2;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.profile.Person",
|
|
||||||
// "firstName": <string>,
|
|
||||||
// "lastName": <string>
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the embedded message type is well-known and has a custom JSON
|
|
||||||
// representation, that representation will be embedded adding a field
|
|
||||||
// `value` which holds the custom JSON in addition to the `@type`
|
|
||||||
// field. Example (for message [google.protobuf.Duration][]):
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
|
||||||
// "value": "1.212s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
message Any {
|
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
|
||||||
// protocol buffer message. This string must contain at least
|
|
||||||
// one "/" character. The last segment of the URL's path must represent
|
|
||||||
// the fully qualified name of the type (as in
|
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
|
||||||
// (e.g., leading "." is not accepted).
|
|
||||||
//
|
|
||||||
// In practice, teams usually precompile into the binary all types that they
|
|
||||||
// expect it to use in the context of Any. However, for URLs which use the
|
|
||||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
|
||||||
// server that maps type URLs to message definitions as follows:
|
|
||||||
//
|
|
||||||
// * If no scheme is provided, `https` is assumed.
|
|
||||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
|
||||||
// value in binary format, or produce an error.
|
|
||||||
// * Applications are allowed to cache lookup results based on the
|
|
||||||
// URL, or have them precompiled into a binary to avoid any
|
|
||||||
// lookup. Therefore, binary compatibility needs to be preserved
|
|
||||||
// on changes to types. (Use versioned type names to manage
|
|
||||||
// breaking changes.)
|
|
||||||
//
|
|
||||||
// Note: this functionality is not currently available in the official
|
|
||||||
// protobuf release, and it is not used for type URLs beginning with
|
|
||||||
// type.googleapis.com.
|
|
||||||
//
|
|
||||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
|
||||||
// used with implementation specific semantics.
|
|
||||||
//
|
|
||||||
string type_url = 1;
|
|
||||||
|
|
||||||
// Must be a valid serialized protocol buffer of the above specified type.
|
|
||||||
bytes value = 2;
|
|
||||||
}
|
|
||||||
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
@@ -1,35 +1,6 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
// Package ptypes provides functionality for interacting with well-known types.
|
||||||
Package ptypes contains code for interacting with well-known types.
|
|
||||||
*/
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|||||||
114
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
114
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
@@ -1,102 +1,72 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements conversions between google.protobuf.Duration
|
|
||||||
// and time.Duration.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||||
|
// This is about 10,000 years in seconds.
|
||||||
const (
|
const (
|
||||||
// Range of a durpb.Duration in seconds, as specified in
|
|
||||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
|
||||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||||
minSeconds = -maxSeconds
|
minSeconds = -maxSeconds
|
||||||
)
|
)
|
||||||
|
|
||||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
// Duration converts a durationpb.Duration to a time.Duration.
|
||||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
if err := validateDuration(dur); err != nil {
|
||||||
func validateDuration(d *durpb.Duration) error {
|
|
||||||
if d == nil {
|
|
||||||
return errors.New("duration: nil Duration")
|
|
||||||
}
|
|
||||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
|
||||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
|
||||||
}
|
|
||||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
|
||||||
}
|
|
||||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
|
||||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
|
||||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
|
||||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
|
||||||
// represented in a time.Duration.
|
|
||||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
|
||||||
if err := validateDuration(p); err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
d := time.Duration(p.Seconds) * time.Second
|
d := time.Duration(dur.Seconds) * time.Second
|
||||||
if int64(d/time.Second) != p.Seconds {
|
if int64(d/time.Second) != dur.Seconds {
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||||
}
|
}
|
||||||
if p.Nanos != 0 {
|
if dur.Nanos != 0 {
|
||||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||||
if (d < 0) != (p.Nanos < 0) {
|
if (d < 0) != (dur.Nanos < 0) {
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||||
func DurationProto(d time.Duration) *durpb.Duration {
|
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||||
nanos := d.Nanoseconds()
|
nanos := d.Nanoseconds()
|
||||||
secs := nanos / 1e9
|
secs := nanos / 1e9
|
||||||
nanos -= secs * 1e9
|
nanos -= secs * 1e9
|
||||||
return &durpb.Duration{
|
return &durationpb.Duration{
|
||||||
Seconds: secs,
|
Seconds: int64(secs),
|
||||||
Nanos: int32(nanos),
|
Nanos: int32(nanos),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateDuration determines whether the durationpb.Duration is valid
|
||||||
|
// according to the definition in google/protobuf/duration.proto.
|
||||||
|
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||||
|
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||||
|
// while the range of time.Duration is about 290 years.
|
||||||
|
func validateDuration(dur *durationpb.Duration) error {
|
||||||
|
if dur == nil {
|
||||||
|
return errors.New("duration: nil Duration")
|
||||||
|
}
|
||||||
|
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||||
|
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||||
|
}
|
||||||
|
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||||
|
}
|
||||||
|
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||||
|
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||||
|
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
196
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
196
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@@ -1,163 +1,63 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/duration.proto
|
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||||
|
|
||||||
package duration
|
package duration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Duration = durationpb.Duration
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// A Duration represents a signed, fixed-length span of time represented
|
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||||
// as a count of seconds and fractions of seconds at nanosecond
|
|
||||||
// resolution. It is independent of any calendar and concepts like "day"
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||||
// or "month". It is related to Timestamp in that the difference between
|
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// two Timestamp values is a Duration and it can be added or subtracted
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
// from a Timestamp. Range is approximately +-10,000 years.
|
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||||
//
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||||
// # Examples
|
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||||
//
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||||
//
|
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||||
// Timestamp start = ...;
|
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||||
// Timestamp end = ...;
|
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
// Duration duration = ...;
|
|
||||||
//
|
|
||||||
// duration.seconds = end.seconds - start.seconds;
|
|
||||||
// duration.nanos = end.nanos - start.nanos;
|
|
||||||
//
|
|
||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
|
||||||
// duration.seconds += 1;
|
|
||||||
// duration.nanos -= 1000000000;
|
|
||||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
|
||||||
// duration.seconds -= 1;
|
|
||||||
// duration.nanos += 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
//
|
|
||||||
// end.seconds = start.seconds + duration.seconds;
|
|
||||||
// end.nanos = start.nanos + duration.nanos;
|
|
||||||
//
|
|
||||||
// if (end.nanos < 0) {
|
|
||||||
// end.seconds -= 1;
|
|
||||||
// end.nanos += 1000000000;
|
|
||||||
// } else if (end.nanos >= 1000000000) {
|
|
||||||
// end.seconds += 1;
|
|
||||||
// end.nanos -= 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
|
||||||
//
|
|
||||||
// td = datetime.timedelta(days=3, minutes=10)
|
|
||||||
// duration = Duration()
|
|
||||||
// duration.FromTimedelta(td)
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Duration type is encoded as a string rather than an
|
|
||||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
|
||||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
|
||||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
|
||||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
|
||||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
|
||||||
// microsecond should be expressed in JSON format as "3.000001s".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
type Duration struct {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
|
||||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Duration) Reset() { *m = Duration{} }
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||||
func (*Duration) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Duration) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Duration.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Duration.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Duration) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Duration) GetNanos() int32 {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||||
return m.Nanos
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return 0
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
|
||||||
// 190 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
|
||||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
|
||||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
|
||||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
|
||||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
|
|
||||||
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
|
|
||||||
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
|
|
||||||
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
|
|
||||||
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
|
|
||||||
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
|
|
||||||
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|||||||
116
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
116
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
@@ -1,116 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "DurationProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// A Duration represents a signed, fixed-length span of time represented
|
|
||||||
// as a count of seconds and fractions of seconds at nanosecond
|
|
||||||
// resolution. It is independent of any calendar and concepts like "day"
|
|
||||||
// or "month". It is related to Timestamp in that the difference between
|
|
||||||
// two Timestamp values is a Duration and it can be added or subtracted
|
|
||||||
// from a Timestamp. Range is approximately +-10,000 years.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
//
|
|
||||||
// duration.seconds = end.seconds - start.seconds;
|
|
||||||
// duration.nanos = end.nanos - start.nanos;
|
|
||||||
//
|
|
||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
|
||||||
// duration.seconds += 1;
|
|
||||||
// duration.nanos -= 1000000000;
|
|
||||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
|
||||||
// duration.seconds -= 1;
|
|
||||||
// duration.nanos += 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
|
||||||
//
|
|
||||||
// Timestamp start = ...;
|
|
||||||
// Duration duration = ...;
|
|
||||||
// Timestamp end = ...;
|
|
||||||
//
|
|
||||||
// end.seconds = start.seconds + duration.seconds;
|
|
||||||
// end.nanos = start.nanos + duration.nanos;
|
|
||||||
//
|
|
||||||
// if (end.nanos < 0) {
|
|
||||||
// end.seconds -= 1;
|
|
||||||
// end.nanos += 1000000000;
|
|
||||||
// } else if (end.nanos >= 1000000000) {
|
|
||||||
// end.seconds += 1;
|
|
||||||
// end.nanos -= 1000000000;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
|
||||||
//
|
|
||||||
// td = datetime.timedelta(days=3, minutes=10)
|
|
||||||
// duration = Duration()
|
|
||||||
// duration.FromTimedelta(td)
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Duration type is encoded as a string rather than an
|
|
||||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
|
||||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
|
||||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
|
||||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
|
||||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
|
||||||
// microsecond should be expressed in JSON format as "3.000001s".
|
|
||||||
//
|
|
||||||
//
|
|
||||||
message Duration {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
|
||||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
||||||
159
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
159
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
@@ -1,46 +1,18 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
// https://github.com/golang/protobuf
|
// Use of this source code is governed by a BSD-style
|
||||||
//
|
// license that can be found in the LICENSE file.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package ptypes
|
package ptypes
|
||||||
|
|
||||||
// This file implements operations on google.protobuf.Timestamp.
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||||
const (
|
const (
|
||||||
// Seconds field of the earliest valid Timestamp.
|
// Seconds field of the earliest valid Timestamp.
|
||||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
@@ -50,17 +22,71 @@ const (
|
|||||||
maxValidSeconds = 253402300800
|
maxValidSeconds = 253402300800
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||||
|
// It returns an error if the argument is invalid.
|
||||||
|
//
|
||||||
|
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||||
|
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||||
|
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||||
|
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||||
|
// do map to valid time.Times.
|
||||||
|
//
|
||||||
|
// A nil Timestamp returns an error. The first return value in that case is
|
||||||
|
// undefined.
|
||||||
|
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||||
|
// Don't return the zero value on error, because corresponds to a valid
|
||||||
|
// timestamp. Instead return whatever time.Unix gives us.
|
||||||
|
var t time.Time
|
||||||
|
if ts == nil {
|
||||||
|
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||||
|
} else {
|
||||||
|
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||||
|
}
|
||||||
|
return t, validateTimestamp(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||||
|
func TimestampNow() *timestamppb.Timestamp {
|
||||||
|
ts, err := TimestampProto(time.Now())
|
||||||
|
if err != nil {
|
||||||
|
panic("ptypes: time.Now() out of Timestamp range")
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||||
|
// It returns an error if the resulting Timestamp is invalid.
|
||||||
|
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||||
|
ts := ×tamppb.Timestamp{
|
||||||
|
Seconds: t.Unix(),
|
||||||
|
Nanos: int32(t.Nanosecond()),
|
||||||
|
}
|
||||||
|
if err := validateTimestamp(ts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||||
|
// For invalid Timestamps, it returns an error message in parentheses.
|
||||||
|
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||||
|
t, err := Timestamp(ts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return t.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
|
||||||
// validateTimestamp determines whether a Timestamp is valid.
|
// validateTimestamp determines whether a Timestamp is valid.
|
||||||
// A valid timestamp represents a time in the range
|
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
// and has a Nanos field in the range [0, 1e9).
|
||||||
// in the range [0, 1e9).
|
|
||||||
//
|
//
|
||||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||||
// Otherwise, it returns an error that describes
|
// Otherwise, it returns an error that describes the problem.
|
||||||
// the problem.
|
|
||||||
//
|
//
|
||||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
// Every valid Timestamp can be represented by a time.Time,
|
||||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
// but the converse is not true.
|
||||||
|
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
return errors.New("timestamp: nil Timestamp")
|
return errors.New("timestamp: nil Timestamp")
|
||||||
}
|
}
|
||||||
@@ -75,58 +101,3 @@ func validateTimestamp(ts *tspb.Timestamp) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
|
||||||
// It returns an error if the argument is invalid.
|
|
||||||
//
|
|
||||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
|
||||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
|
||||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
|
||||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
|
||||||
// do map to valid time.Times.
|
|
||||||
//
|
|
||||||
// A nil Timestamp returns an error. The first return value in that case is
|
|
||||||
// undefined.
|
|
||||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
|
||||||
// Don't return the zero value on error, because corresponds to a valid
|
|
||||||
// timestamp. Instead return whatever time.Unix gives us.
|
|
||||||
var t time.Time
|
|
||||||
if ts == nil {
|
|
||||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
|
||||||
} else {
|
|
||||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
|
||||||
}
|
|
||||||
return t, validateTimestamp(ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
|
||||||
func TimestampNow() *tspb.Timestamp {
|
|
||||||
ts, err := TimestampProto(time.Now())
|
|
||||||
if err != nil {
|
|
||||||
panic("ptypes: time.Now() out of Timestamp range")
|
|
||||||
}
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
|
||||||
// It returns an error if the resulting Timestamp is invalid.
|
|
||||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
|
||||||
ts := &tspb.Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
if err := validateTimestamp(ts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
|
||||||
// Timestamps, it returns an error message in parentheses.
|
|
||||||
func TimestampString(ts *tspb.Timestamp) string {
|
|
||||||
t, err := Timestamp(ts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("(%v)", err)
|
|
||||||
}
|
|
||||||
return t.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
|
|||||||
219
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
219
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@@ -1,185 +1,64 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/timestamp.proto
|
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||||
|
|
||||||
package timestamp
|
package timestamp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type Timestamp = timestamppb.Timestamp
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// Gregorian calendar backwards to year one.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
//
|
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||||
// smear](https://developers.google.com/time/smear).
|
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||||
//
|
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
//
|
0x33,
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
type Timestamp struct {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||||
func (*Timestamp) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Timestamp.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Timestamp) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
func (m *Timestamp) GetNanos() int32 {
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
if m != nil {
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||||
return m.Nanos
|
NumEnums: 0,
|
||||||
}
|
NumMessages: 0,
|
||||||
return 0
|
NumExtensions: 0,
|
||||||
}
|
NumServices: 0,
|
||||||
|
},
|
||||||
func init() {
|
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||||
func init() {
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||||
}
|
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||||
|
|
||||||
var fileDescriptor_292007bbfe81227e = []byte{
|
|
||||||
// 191 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
|
||||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
|
||||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
|
||||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
|
||||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
|
|
||||||
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
|
|
||||||
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
|
|
||||||
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
|
|
||||||
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
|
|
||||||
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
|
|
||||||
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|||||||
138
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
138
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
@@ -1,138 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "TimestampProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
|
||||||
// Gregorian calendar backwards to year one.
|
|
||||||
//
|
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
|
||||||
// smear](https://developers.google.com/time/smear).
|
|
||||||
//
|
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
message Timestamp {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
||||||
504
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
504
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
@@ -1,463 +1,71 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: google/protobuf/wrappers.proto
|
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
|
||||||
|
|
||||||
package wrappers
|
package wrappers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
proto "github.com/golang/protobuf/proto"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
math "math"
|
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Symbols defined in public import of google/protobuf/wrappers.proto.
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
type DoubleValue = wrapperspb.DoubleValue
|
||||||
// is compatible with the proto package it is being compiled against.
|
type FloatValue = wrapperspb.FloatValue
|
||||||
// A compilation error at this line likely means your copy of the
|
type Int64Value = wrapperspb.Int64Value
|
||||||
// proto package needs to be updated.
|
type UInt64Value = wrapperspb.UInt64Value
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
type Int32Value = wrapperspb.Int32Value
|
||||||
|
type UInt32Value = wrapperspb.UInt32Value
|
||||||
|
type BoolValue = wrapperspb.BoolValue
|
||||||
|
type StringValue = wrapperspb.StringValue
|
||||||
|
type BytesValue = wrapperspb.BytesValue
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
|
||||||
type DoubleValue struct {
|
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||||
// The double value.
|
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||||
XXX_unrecognized []byte `json:"-"`
|
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
|
||||||
XXX_sizecache int32 `json:"-"`
|
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||||
|
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||||
|
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||||
|
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
|
||||||
|
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DoubleValue) Reset() { *m = DoubleValue{} }
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
|
||||||
func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
|
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
|
||||||
func (*DoubleValue) ProtoMessage() {}
|
0, // [0:0] is the sub-list for method output_type
|
||||||
func (*DoubleValue) Descriptor() ([]byte, []int) {
|
0, // [0:0] is the sub-list for method input_type
|
||||||
return fileDescriptor_5377b62bda767935, []int{0}
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
0, // [0:0] is the sub-list for extension extendee
|
||||||
|
0, // [0:0] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
|
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
|
||||||
|
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
|
||||||
func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
|
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
|
||||||
return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
|
return
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_DoubleValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_DoubleValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_DoubleValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *DoubleValue) GetValue() float64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
}
|
||||||
return 0
|
type x struct{}
|
||||||
}
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
// Wrapper message for `float`.
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
//
|
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
NumEnums: 0,
|
||||||
type FloatValue struct {
|
NumMessages: 0,
|
||||||
// The float value.
|
NumExtensions: 0,
|
||||||
Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
|
NumServices: 0,
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
},
|
||||||
XXX_unrecognized []byte `json:"-"`
|
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
|
||||||
XXX_sizecache int32 `json:"-"`
|
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
|
||||||
}
|
}.Build()
|
||||||
|
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
|
||||||
func (m *FloatValue) Reset() { *m = FloatValue{} }
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
|
||||||
func (m *FloatValue) String() string { return proto.CompactTextString(m) }
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
|
||||||
func (*FloatValue) ProtoMessage() {}
|
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
|
||||||
func (*FloatValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
|
|
||||||
|
|
||||||
func (m *FloatValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_FloatValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_FloatValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_FloatValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_FloatValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_FloatValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *FloatValue) GetValue() float32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
type Int64Value struct {
|
|
||||||
// The int64 value.
|
|
||||||
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int64Value) Reset() { *m = Int64Value{} }
|
|
||||||
func (m *Int64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int64Value) ProtoMessage() {}
|
|
||||||
func (*Int64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
|
|
||||||
|
|
||||||
func (m *Int64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int64Value) GetValue() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
type UInt64Value struct {
|
|
||||||
// The uint64 value.
|
|
||||||
Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt64Value) Reset() { *m = UInt64Value{} }
|
|
||||||
func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt64Value) ProtoMessage() {}
|
|
||||||
func (*UInt64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
|
|
||||||
|
|
||||||
func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt64Value) GetValue() uint64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
type Int32Value struct {
|
|
||||||
// The int32 value.
|
|
||||||
Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int32Value) Reset() { *m = Int32Value{} }
|
|
||||||
func (m *Int32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int32Value) ProtoMessage() {}
|
|
||||||
func (*Int32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
|
|
||||||
|
|
||||||
func (m *Int32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int32Value) GetValue() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
type UInt32Value struct {
|
|
||||||
// The uint32 value.
|
|
||||||
Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt32Value) Reset() { *m = UInt32Value{} }
|
|
||||||
func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt32Value) ProtoMessage() {}
|
|
||||||
func (*UInt32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{5}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
|
|
||||||
|
|
||||||
func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt32Value) GetValue() uint32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
type BoolValue struct {
|
|
||||||
// The bool value.
|
|
||||||
Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BoolValue) Reset() { *m = BoolValue{} }
|
|
||||||
func (m *BoolValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BoolValue) ProtoMessage() {}
|
|
||||||
func (*BoolValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{6}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
|
|
||||||
|
|
||||||
func (m *BoolValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BoolValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BoolValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BoolValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BoolValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BoolValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BoolValue) GetValue() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
type StringValue struct {
|
|
||||||
// The string value.
|
|
||||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StringValue) Reset() { *m = StringValue{} }
|
|
||||||
func (m *StringValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*StringValue) ProtoMessage() {}
|
|
||||||
func (*StringValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{7}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
|
|
||||||
|
|
||||||
func (m *StringValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_StringValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_StringValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_StringValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_StringValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_StringValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *StringValue) GetValue() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
type BytesValue struct {
|
|
||||||
// The bytes value.
|
|
||||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BytesValue) Reset() { *m = BytesValue{} }
|
|
||||||
func (m *BytesValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BytesValue) ProtoMessage() {}
|
|
||||||
func (*BytesValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{8}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
|
|
||||||
|
|
||||||
func (m *BytesValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BytesValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BytesValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BytesValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BytesValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BytesValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BytesValue) GetValue() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
|
|
||||||
proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
|
|
||||||
proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
|
|
||||||
proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
|
|
||||||
proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
|
|
||||||
proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
|
|
||||||
proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
|
|
||||||
proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
|
|
||||||
proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_5377b62bda767935 = []byte{
|
|
||||||
// 259 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
|
|
||||||
0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
|
|
||||||
0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
|
|
||||||
0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
|
|
||||||
0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
|
|
||||||
0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
|
|
||||||
0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
|
|
||||||
0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
|
|
||||||
0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
|
|
||||||
0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
|
|
||||||
0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
|
|
||||||
0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
|
|
||||||
0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
|
|
||||||
0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
|
|
||||||
0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
|
|
||||||
0x01, 0x00, 0x00,
|
|
||||||
}
|
}
|
||||||
|
|||||||
123
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
123
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
@@ -1,123 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Wrappers for primitive (non-message) types. These types are useful
|
|
||||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
|
||||||
// where we need to distinguish between the absence of a primitive
|
|
||||||
// typed field and its default value.
|
|
||||||
//
|
|
||||||
// These wrappers have no meaningful use within repeated fields as they lack
|
|
||||||
// the ability to detect presence on individual elements.
|
|
||||||
// These wrappers have no meaningful use within a map or a oneof since
|
|
||||||
// individual entries of a map or fields of a oneof can already detect presence.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "WrappersProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
|
||||||
message DoubleValue {
|
|
||||||
// The double value.
|
|
||||||
double value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `float`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
|
||||||
message FloatValue {
|
|
||||||
// The float value.
|
|
||||||
float value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
message Int64Value {
|
|
||||||
// The int64 value.
|
|
||||||
int64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
message UInt64Value {
|
|
||||||
// The uint64 value.
|
|
||||||
uint64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
message Int32Value {
|
|
||||||
// The int32 value.
|
|
||||||
int32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
message UInt32Value {
|
|
||||||
// The uint32 value.
|
|
||||||
uint32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
message BoolValue {
|
|
||||||
// The bool value.
|
|
||||||
bool value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
message StringValue {
|
|
||||||
// The string value.
|
|
||||||
string value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
message BytesValue {
|
|
||||||
// The bytes value.
|
|
||||||
bytes value = 1;
|
|
||||||
}
|
|
||||||
4455
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
4455
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
78
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
78
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
@@ -17,13 +17,14 @@ package compiler
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
yaml "gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var fileCache map[string][]byte
|
var fileCache map[string][]byte
|
||||||
@@ -31,6 +32,8 @@ var infoCache map[string]interface{}
|
|||||||
var count int64
|
var count int64
|
||||||
|
|
||||||
var verboseReader = false
|
var verboseReader = false
|
||||||
|
var fileCacheEnable = true
|
||||||
|
var infoCacheEnable = true
|
||||||
|
|
||||||
func initializeFileCache() {
|
func initializeFileCache() {
|
||||||
if fileCache == nil {
|
if fileCache == nil {
|
||||||
@@ -44,9 +47,63 @@ func initializeInfoCache() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EnableFileCache() {
|
||||||
|
fileCacheEnable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func EnableInfoCache() {
|
||||||
|
infoCacheEnable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func DisableFileCache() {
|
||||||
|
fileCacheEnable = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func DisableInfoCache() {
|
||||||
|
infoCacheEnable = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveFromFileCache(fileurl string) {
|
||||||
|
if !fileCacheEnable {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
initializeFileCache()
|
||||||
|
delete(fileCache, fileurl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveFromInfoCache(filename string) {
|
||||||
|
if !infoCacheEnable {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
initializeInfoCache()
|
||||||
|
delete(infoCache, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetInfoCache() map[string]interface{} {
|
||||||
|
if infoCache == nil {
|
||||||
|
initializeInfoCache()
|
||||||
|
}
|
||||||
|
return infoCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClearFileCache() {
|
||||||
|
fileCache = make(map[string][]byte, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClearInfoCache() {
|
||||||
|
infoCache = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClearCaches() {
|
||||||
|
ClearFileCache()
|
||||||
|
ClearInfoCache()
|
||||||
|
}
|
||||||
|
|
||||||
// FetchFile gets a specified file from the local filesystem or a remote location.
|
// FetchFile gets a specified file from the local filesystem or a remote location.
|
||||||
func FetchFile(fileurl string) ([]byte, error) {
|
func FetchFile(fileurl string) ([]byte, error) {
|
||||||
|
var bytes []byte
|
||||||
initializeFileCache()
|
initializeFileCache()
|
||||||
|
if fileCacheEnable {
|
||||||
bytes, ok := fileCache[fileurl]
|
bytes, ok := fileCache[fileurl]
|
||||||
if ok {
|
if ok {
|
||||||
if verboseReader {
|
if verboseReader {
|
||||||
@@ -57,16 +114,17 @@ func FetchFile(fileurl string) ([]byte, error) {
|
|||||||
if verboseReader {
|
if verboseReader {
|
||||||
log.Printf("Fetching %s", fileurl)
|
log.Printf("Fetching %s", fileurl)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
response, err := http.Get(fileurl)
|
response, err := http.Get(fileurl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
if response.StatusCode != 200 {
|
if response.StatusCode != 200 {
|
||||||
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
|
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
|
||||||
bytes, err = ioutil.ReadAll(response.Body)
|
bytes, err = ioutil.ReadAll(response.Body)
|
||||||
if err == nil {
|
if fileCacheEnable && err == nil {
|
||||||
fileCache[fileurl] = bytes
|
fileCache[fileurl] = bytes
|
||||||
}
|
}
|
||||||
return bytes, err
|
return bytes, err
|
||||||
@@ -95,6 +153,7 @@ func ReadBytesForFile(filename string) ([]byte, error) {
|
|||||||
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
|
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
|
||||||
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
|
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
|
||||||
initializeInfoCache()
|
initializeInfoCache()
|
||||||
|
if infoCacheEnable {
|
||||||
cachedInfo, ok := infoCache[filename]
|
cachedInfo, ok := infoCache[filename]
|
||||||
if ok {
|
if ok {
|
||||||
if verboseReader {
|
if verboseReader {
|
||||||
@@ -105,12 +164,13 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
|
|||||||
if verboseReader {
|
if verboseReader {
|
||||||
log.Printf("Reading info for file %s", filename)
|
log.Printf("Reading info for file %s", filename)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
var info yaml.MapSlice
|
var info yaml.MapSlice
|
||||||
err := yaml.Unmarshal(bytes, &info)
|
err := yaml.Unmarshal(bytes, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(filename) > 0 {
|
if infoCacheEnable && len(filename) > 0 {
|
||||||
infoCache[filename] = info
|
infoCache[filename] = info
|
||||||
}
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
@@ -119,7 +179,7 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
|
|||||||
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
|
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
|
||||||
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
||||||
initializeInfoCache()
|
initializeInfoCache()
|
||||||
{
|
if infoCacheEnable {
|
||||||
info, ok := infoCache[ref]
|
info, ok := infoCache[ref]
|
||||||
if ok {
|
if ok {
|
||||||
if verboseReader {
|
if verboseReader {
|
||||||
@@ -127,16 +187,20 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if verboseReader {
|
if verboseReader {
|
||||||
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
count = count + 1
|
count = count + 1
|
||||||
basedir, _ := filepath.Split(basefile)
|
basedir, _ := filepath.Split(basefile)
|
||||||
parts := strings.Split(ref, "#")
|
parts := strings.Split(ref, "#")
|
||||||
var filename string
|
var filename string
|
||||||
if parts[0] != "" {
|
if parts[0] != "" {
|
||||||
|
filename = parts[0]
|
||||||
|
if _, err := url.ParseRequestURI(parts[0]); err != nil {
|
||||||
|
// It is not an URL, so the file is local
|
||||||
filename = basedir + parts[0]
|
filename = basedir + parts[0]
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
filename = basefile
|
filename = basefile
|
||||||
}
|
}
|
||||||
@@ -170,6 +234,8 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if infoCacheEnable {
|
||||||
infoCache[ref] = info
|
infoCache[ref] = info
|
||||||
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|||||||
5
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
5
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
@@ -1,5 +0,0 @@
|
|||||||
go get github.com/golang/protobuf/protoc-gen-go
|
|
||||||
|
|
||||||
protoc \
|
|
||||||
--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto
|
|
||||||
|
|
||||||
204
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
204
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
@@ -1,24 +1,14 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: extension.proto
|
// source: extensions/extension.proto
|
||||||
|
|
||||||
/*
|
|
||||||
Package openapiextension_v1 is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
extension.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Version
|
|
||||||
ExtensionHandlerRequest
|
|
||||||
ExtensionHandlerResponse
|
|
||||||
Wrapper
|
|
||||||
*/
|
|
||||||
package openapiextension_v1
|
package openapiextension_v1
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import (
|
||||||
import fmt "fmt"
|
fmt "fmt"
|
||||||
import math "math"
|
proto "github.com/golang/protobuf/proto"
|
||||||
import google_protobuf "github.com/golang/protobuf/ptypes/any"
|
any "github.com/golang/protobuf/ptypes/any"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
@@ -29,22 +19,45 @@ var _ = math.Inf
|
|||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// The version number of OpenAPI compiler.
|
// The version number of OpenAPI compiler.
|
||||||
type Version struct {
|
type Version struct {
|
||||||
Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
|
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
|
||||||
Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
|
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
|
||||||
Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
|
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
|
||||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||||
// be empty for mainline stable releases.
|
// be empty for mainline stable releases.
|
||||||
Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
|
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Version) Reset() { *m = Version{} }
|
func (m *Version) Reset() { *m = Version{} }
|
||||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Version) ProtoMessage() {}
|
func (*Version) ProtoMessage() {}
|
||||||
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Version) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_661e47e790f76671, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Version) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Version.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Version) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Version.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Version) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Version.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Version) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Version.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Version proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Version) GetMajor() int32 {
|
func (m *Version) GetMajor() int32 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -78,15 +91,38 @@ func (m *Version) GetSuffix() string {
|
|||||||
type ExtensionHandlerRequest struct {
|
type ExtensionHandlerRequest struct {
|
||||||
// The OpenAPI descriptions that were explicitly listed on the command line.
|
// The OpenAPI descriptions that were explicitly listed on the command line.
|
||||||
// The specifications will appear in the order they are specified to gnostic.
|
// The specifications will appear in the order they are specified to gnostic.
|
||||||
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"`
|
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
|
||||||
// The version number of openapi compiler.
|
// The version number of openapi compiler.
|
||||||
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
|
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
|
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
|
||||||
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||||
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_661e47e790f76671, []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
|
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -105,7 +141,7 @@ func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
|
|||||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||||
type ExtensionHandlerResponse struct {
|
type ExtensionHandlerResponse struct {
|
||||||
// true if the extension is handled by the extension handler; false otherwise
|
// true if the extension is handled by the extension handler; false otherwise
|
||||||
Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"`
|
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
|
||||||
// Error message. If non-empty, the extension handling failed.
|
// Error message. If non-empty, the extension handling failed.
|
||||||
// The extension handler process should exit with status code zero
|
// The extension handler process should exit with status code zero
|
||||||
// even if it reports an error in this way.
|
// even if it reports an error in this way.
|
||||||
@@ -115,15 +151,38 @@ type ExtensionHandlerResponse struct {
|
|||||||
// itself -- such as the input Document being unparseable -- should be
|
// itself -- such as the input Document being unparseable -- should be
|
||||||
// reported by writing a message to stderr and exiting with a non-zero
|
// reported by writing a message to stderr and exiting with a non-zero
|
||||||
// status code.
|
// status code.
|
||||||
Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"`
|
Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"`
|
||||||
// text output
|
// text output
|
||||||
Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
|
Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
|
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
|
||||||
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||||
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_661e47e790f76671, []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerResponse) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *ExtensionHandlerResponse) GetHandled() bool {
|
func (m *ExtensionHandlerResponse) GetHandled() bool {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -139,7 +198,7 @@ func (m *ExtensionHandlerResponse) GetError() []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
|
func (m *ExtensionHandlerResponse) GetValue() *any.Any {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Value
|
return m.Value
|
||||||
}
|
}
|
||||||
@@ -148,17 +207,40 @@ func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
|
|||||||
|
|
||||||
type Wrapper struct {
|
type Wrapper struct {
|
||||||
// version of the OpenAPI specification in which this extension was written.
|
// version of the OpenAPI specification in which this extension was written.
|
||||||
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
|
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||||
// Name of the extension
|
// Name of the extension
|
||||||
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"`
|
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
|
||||||
// Must be a valid yaml for the proto
|
// Must be a valid yaml for the proto
|
||||||
Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"`
|
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Wrapper) Reset() { *m = Wrapper{} }
|
func (m *Wrapper) Reset() { *m = Wrapper{} }
|
||||||
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Wrapper) ProtoMessage() {}
|
func (*Wrapper) ProtoMessage() {}
|
||||||
func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*Wrapper) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_661e47e790f76671, []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Wrapper) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Wrapper.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Wrapper) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Wrapper.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Wrapper) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Wrapper.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Wrapper) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Wrapper.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Wrapper proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *Wrapper) GetVersion() string {
|
func (m *Wrapper) GetVersion() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -188,31 +270,31 @@ func init() {
|
|||||||
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("extension.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor_661e47e790f76671 = []byte{
|
||||||
// 357 bytes of a gzipped FileDescriptorProto
|
// 362 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40,
|
||||||
0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64,
|
0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50,
|
||||||
0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46,
|
0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7,
|
||||||
0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7,
|
0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85,
|
||||||
0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22,
|
0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
|
||||||
0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11,
|
0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71,
|
||||||
0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18,
|
0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e,
|
||||||
0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8,
|
0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d,
|
||||||
0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c,
|
0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef,
|
||||||
0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69,
|
0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35,
|
||||||
0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef,
|
0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14,
|
||||||
0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3,
|
0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39,
|
||||||
0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87,
|
0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08,
|
||||||
0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44,
|
0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81,
|
||||||
0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80,
|
0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3,
|
||||||
0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4,
|
0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8,
|
||||||
0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54,
|
0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35,
|
||||||
0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c,
|
0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0,
|
||||||
0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e,
|
0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18,
|
||||||
0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2,
|
0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09,
|
||||||
0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa,
|
0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d,
|
||||||
0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b,
|
0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00,
|
||||||
0x80, 0x51, 0x02, 0x00, 0x00,
|
0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7105,15 +7105,15 @@ func (m *Any) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
|
||||||
func (m *ApiKeySecurity) ToRawInfo() interface{} {
|
func (m *ApiKeySecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Name != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
// always include this required field.
|
||||||
if m.In != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7129,9 +7129,11 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
|
||||||
func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
|
func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7147,21 +7149,21 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
|
// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
|
||||||
func (m *BodyParameter) ToRawInfo() interface{} {
|
func (m *BodyParameter) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
if m.Name != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
// always include this required field.
|
||||||
if m.In != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
||||||
}
|
|
||||||
if m.Required != false {
|
if m.Required != false {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
}
|
}
|
||||||
if m.Schema != nil {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
|
||||||
}
|
|
||||||
// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.VendorExtension != nil {
|
if m.VendorExtension != nil {
|
||||||
for _, item := range m.VendorExtension {
|
for _, item := range m.VendorExtension {
|
||||||
@@ -7175,6 +7177,9 @@ func (m *BodyParameter) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
|
||||||
func (m *Contact) ToRawInfo() interface{} {
|
func (m *Contact) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7196,6 +7201,9 @@ func (m *Contact) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Default suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Default suitable for JSON or YAML export.
|
||||||
func (m *Default) ToRawInfo() interface{} {
|
func (m *Default) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -7208,6 +7216,9 @@ func (m *Default) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
|
||||||
func (m *Definitions) ToRawInfo() interface{} {
|
func (m *Definitions) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -7220,12 +7231,13 @@ func (m *Definitions) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Document suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Document suitable for JSON or YAML export.
|
||||||
func (m *Document) ToRawInfo() interface{} {
|
func (m *Document) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Swagger != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
|
info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Info != nil {
|
|
||||||
info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
|
||||||
}
|
|
||||||
// &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.Host != "" {
|
if m.Host != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
|
info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
|
||||||
@@ -7242,9 +7254,8 @@ func (m *Document) ToRawInfo() interface{} {
|
|||||||
if len(m.Produces) != 0 {
|
if len(m.Produces) != 0 {
|
||||||
info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
|
info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
|
||||||
}
|
}
|
||||||
if m.Paths != nil {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
|
||||||
}
|
|
||||||
// &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.Definitions != nil {
|
if m.Definitions != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
|
||||||
@@ -7294,6 +7305,9 @@ func (m *Document) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
|
||||||
func (m *Examples) ToRawInfo() interface{} {
|
func (m *Examples) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -7306,12 +7320,14 @@ func (m *Examples) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
|
// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
|
||||||
func (m *ExternalDocs) ToRawInfo() interface{} {
|
func (m *ExternalDocs) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
if m.Url != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
|
info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
|
||||||
}
|
|
||||||
if m.VendorExtension != nil {
|
if m.VendorExtension != nil {
|
||||||
for _, item := range m.VendorExtension {
|
for _, item := range m.VendorExtension {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -7324,6 +7340,9 @@ func (m *ExternalDocs) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
|
||||||
func (m *FileSchema) ToRawInfo() interface{} {
|
func (m *FileSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Format != "" {
|
if m.Format != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
|
info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
|
||||||
}
|
}
|
||||||
@@ -7340,9 +7359,8 @@ func (m *FileSchema) ToRawInfo() interface{} {
|
|||||||
if len(m.Required) != 0 {
|
if len(m.Required) != 0 {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
}
|
}
|
||||||
if m.Type != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
|
||||||
if m.ReadOnly != false {
|
if m.ReadOnly != false {
|
||||||
info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
|
info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
|
||||||
}
|
}
|
||||||
@@ -7366,6 +7384,9 @@ func (m *FileSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
|
||||||
func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
|
func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Required != false {
|
if m.Required != false {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
}
|
}
|
||||||
@@ -7451,9 +7472,11 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Header suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Header suitable for JSON or YAML export.
|
||||||
func (m *Header) ToRawInfo() interface{} {
|
func (m *Header) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
if m.Format != "" {
|
if m.Format != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
|
info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
|
||||||
}
|
}
|
||||||
@@ -7524,6 +7547,9 @@ func (m *Header) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
|
||||||
func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
|
func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Required != false {
|
if m.Required != false {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
}
|
}
|
||||||
@@ -7606,6 +7632,9 @@ func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
|
||||||
func (m *Headers) ToRawInfo() interface{} {
|
func (m *Headers) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -7618,12 +7647,13 @@ func (m *Headers) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Info suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Info suitable for JSON or YAML export.
|
||||||
func (m *Info) ToRawInfo() interface{} {
|
func (m *Info) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Title != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
|
info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Version != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
|
info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7650,6 +7680,9 @@ func (m *Info) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
|
// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
|
||||||
func (m *ItemsItem) ToRawInfo() interface{} {
|
func (m *ItemsItem) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if len(m.Schema) != 0 {
|
if len(m.Schema) != 0 {
|
||||||
items := make([]interface{}, 0)
|
items := make([]interface{}, 0)
|
||||||
for _, item := range m.Schema {
|
for _, item := range m.Schema {
|
||||||
@@ -7664,9 +7697,11 @@ func (m *ItemsItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
|
// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
|
||||||
func (m *JsonReference) ToRawInfo() interface{} {
|
func (m *JsonReference) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.XRef != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7676,9 +7711,11 @@ func (m *JsonReference) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of License suitable for JSON or YAML export.
|
// ToRawInfo returns a description of License suitable for JSON or YAML export.
|
||||||
func (m *License) ToRawInfo() interface{} {
|
func (m *License) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Name != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
if m.Url != "" {
|
if m.Url != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
|
info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
|
||||||
}
|
}
|
||||||
@@ -7694,6 +7731,9 @@ func (m *License) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
|
||||||
func (m *NamedAny) ToRawInfo() interface{} {
|
func (m *NamedAny) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7704,6 +7744,9 @@ func (m *NamedAny) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
|
||||||
func (m *NamedHeader) ToRawInfo() interface{} {
|
func (m *NamedHeader) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7714,6 +7757,9 @@ func (m *NamedHeader) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
|
||||||
func (m *NamedParameter) ToRawInfo() interface{} {
|
func (m *NamedParameter) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7724,6 +7770,9 @@ func (m *NamedParameter) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
|
||||||
func (m *NamedPathItem) ToRawInfo() interface{} {
|
func (m *NamedPathItem) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7734,6 +7783,9 @@ func (m *NamedPathItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
|
||||||
func (m *NamedResponse) ToRawInfo() interface{} {
|
func (m *NamedResponse) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7744,6 +7796,9 @@ func (m *NamedResponse) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
|
||||||
func (m *NamedResponseValue) ToRawInfo() interface{} {
|
func (m *NamedResponseValue) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7754,6 +7809,9 @@ func (m *NamedResponseValue) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
|
||||||
func (m *NamedSchema) ToRawInfo() interface{} {
|
func (m *NamedSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7764,6 +7822,9 @@ func (m *NamedSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
|
||||||
func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
|
func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7774,6 +7835,9 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
|
||||||
func (m *NamedString) ToRawInfo() interface{} {
|
func (m *NamedString) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7786,6 +7850,9 @@ func (m *NamedString) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
|
// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
|
||||||
func (m *NamedStringArray) ToRawInfo() interface{} {
|
func (m *NamedStringArray) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
@@ -7823,22 +7890,21 @@ func (m *NonBodyParameter) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
|
||||||
func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
|
func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Flow != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
||||||
}
|
|
||||||
if m.Scopes != nil {
|
if m.Scopes != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
||||||
}
|
}
|
||||||
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.AuthorizationUrl != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
|
info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
|
||||||
}
|
// always include this required field.
|
||||||
if m.TokenUrl != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7854,19 +7920,19 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
|
||||||
func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
|
func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Flow != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
||||||
}
|
|
||||||
if m.Scopes != nil {
|
if m.Scopes != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
||||||
}
|
}
|
||||||
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.TokenUrl != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7882,19 +7948,19 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
|
||||||
func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
|
func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Flow != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
||||||
}
|
|
||||||
if m.Scopes != nil {
|
if m.Scopes != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
||||||
}
|
}
|
||||||
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.AuthorizationUrl != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
|
info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7910,19 +7976,19 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
|
||||||
func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
|
func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Type != "" {
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
// always include this required field.
|
||||||
if m.Flow != "" {
|
|
||||||
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
|
||||||
}
|
|
||||||
if m.Scopes != nil {
|
if m.Scopes != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
|
||||||
}
|
}
|
||||||
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if m.TokenUrl != "" {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
|
||||||
}
|
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -7938,6 +8004,9 @@ func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
|
||||||
func (m *Oauth2Scopes) ToRawInfo() interface{} {
|
func (m *Oauth2Scopes) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
|
// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
@@ -7945,6 +8014,9 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
|
||||||
func (m *Operation) ToRawInfo() interface{} {
|
func (m *Operation) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if len(m.Tags) != 0 {
|
if len(m.Tags) != 0 {
|
||||||
info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
|
info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
|
||||||
}
|
}
|
||||||
@@ -7975,9 +8047,8 @@ func (m *Operation) ToRawInfo() interface{} {
|
|||||||
info = append(info, yaml.MapItem{Key: "parameters", Value: items})
|
info = append(info, yaml.MapItem{Key: "parameters", Value: items})
|
||||||
}
|
}
|
||||||
// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
|
// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
|
||||||
if m.Responses != nil {
|
// always include this required field.
|
||||||
info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
|
||||||
}
|
|
||||||
// &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
// &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
|
||||||
if len(m.Schemes) != 0 {
|
if len(m.Schemes) != 0 {
|
||||||
info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
|
info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
|
||||||
@@ -8022,6 +8093,9 @@ func (m *Parameter) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
|
// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
|
||||||
func (m *ParameterDefinitions) ToRawInfo() interface{} {
|
func (m *ParameterDefinitions) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8051,6 +8125,9 @@ func (m *ParametersItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
|
// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
|
||||||
func (m *PathItem) ToRawInfo() interface{} {
|
func (m *PathItem) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.XRef != "" {
|
if m.XRef != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
||||||
}
|
}
|
||||||
@@ -8102,9 +8179,11 @@ func (m *PathItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
|
||||||
func (m *PathParameterSubSchema) ToRawInfo() interface{} {
|
func (m *PathParameterSubSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Required != false {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
if m.In != "" {
|
if m.In != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
info = append(info, yaml.MapItem{Key: "in", Value: m.In})
|
||||||
}
|
}
|
||||||
@@ -8184,6 +8263,9 @@ func (m *PathParameterSubSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
|
||||||
func (m *Paths) ToRawInfo() interface{} {
|
func (m *Paths) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.VendorExtension != nil {
|
if m.VendorExtension != nil {
|
||||||
for _, item := range m.VendorExtension {
|
for _, item := range m.VendorExtension {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8202,6 +8284,9 @@ func (m *Paths) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
|
// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
|
||||||
func (m *PrimitivesItems) ToRawInfo() interface{} {
|
func (m *PrimitivesItems) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Type != "" {
|
if m.Type != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
|
||||||
}
|
}
|
||||||
@@ -8272,6 +8357,9 @@ func (m *PrimitivesItems) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
|
||||||
func (m *Properties) ToRawInfo() interface{} {
|
func (m *Properties) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8284,6 +8372,9 @@ func (m *Properties) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
|
||||||
func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
|
func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Required != false {
|
if m.Required != false {
|
||||||
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
|
||||||
}
|
}
|
||||||
@@ -8369,9 +8460,11 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Response suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Response suitable for JSON or YAML export.
|
||||||
func (m *Response) ToRawInfo() interface{} {
|
func (m *Response) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Description != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
if m.Schema != nil {
|
if m.Schema != nil {
|
||||||
info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
|
||||||
}
|
}
|
||||||
@@ -8396,6 +8489,9 @@ func (m *Response) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
|
// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
|
||||||
func (m *ResponseDefinitions) ToRawInfo() interface{} {
|
func (m *ResponseDefinitions) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8425,6 +8521,9 @@ func (m *ResponseValue) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
|
||||||
func (m *Responses) ToRawInfo() interface{} {
|
func (m *Responses) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.ResponseCode != nil {
|
if m.ResponseCode != nil {
|
||||||
for _, item := range m.ResponseCode {
|
for _, item := range m.ResponseCode {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8443,6 +8542,9 @@ func (m *Responses) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
|
||||||
func (m *Schema) ToRawInfo() interface{} {
|
func (m *Schema) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.XRef != "" {
|
if m.XRef != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
|
||||||
}
|
}
|
||||||
@@ -8588,6 +8690,9 @@ func (m *SchemaItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
|
// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
|
||||||
func (m *SecurityDefinitions) ToRawInfo() interface{} {
|
func (m *SecurityDefinitions) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8637,6 +8742,9 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
|
// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
|
||||||
func (m *SecurityRequirement) ToRawInfo() interface{} {
|
func (m *SecurityRequirement) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8654,9 +8762,11 @@ func (m *StringArray) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
|
||||||
func (m *Tag) ToRawInfo() interface{} {
|
func (m *Tag) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
if m.Name != "" {
|
if m == nil {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
return info
|
||||||
}
|
}
|
||||||
|
// always include this required field.
|
||||||
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
if m.Description != "" {
|
if m.Description != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
|
||||||
}
|
}
|
||||||
@@ -8676,6 +8786,9 @@ func (m *Tag) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
|
// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
|
||||||
func (m *TypeItem) ToRawInfo() interface{} {
|
func (m *TypeItem) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if len(m.Value) != 0 {
|
if len(m.Value) != 0 {
|
||||||
info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
|
info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
|
||||||
}
|
}
|
||||||
@@ -8685,6 +8798,9 @@ func (m *TypeItem) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
|
// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
|
||||||
func (m *VendorExtension) ToRawInfo() interface{} {
|
func (m *VendorExtension) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.AdditionalProperties != nil {
|
if m.AdditionalProperties != nil {
|
||||||
for _, item := range m.AdditionalProperties {
|
for _, item := range m.AdditionalProperties {
|
||||||
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
|
||||||
@@ -8697,6 +8813,9 @@ func (m *VendorExtension) ToRawInfo() interface{} {
|
|||||||
// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
|
// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
|
||||||
func (m *Xml) ToRawInfo() interface{} {
|
func (m *Xml) ToRawInfo() interface{} {
|
||||||
info := yaml.MapSlice{}
|
info := yaml.MapSlice{}
|
||||||
|
if m == nil {
|
||||||
|
return info
|
||||||
|
}
|
||||||
if m.Name != "" {
|
if m.Name != "" {
|
||||||
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
|
||||||
}
|
}
|
||||||
5226
vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
generated
vendored
Normal file
5226
vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
18
vendor/github.com/json-iterator/go/README.md
generated
vendored
18
vendor/github.com/json-iterator/go/README.md
generated
vendored
@@ -1,5 +1,5 @@
|
|||||||
[](https://sourcegraph.com/github.com/json-iterator/go?badge)
|
[](https://sourcegraph.com/github.com/json-iterator/go?badge)
|
||||||
[](http://godoc.org/github.com/json-iterator/go)
|
[](https://pkg.go.dev/github.com/json-iterator/go)
|
||||||
[](https://travis-ci.org/json-iterator/go)
|
[](https://travis-ci.org/json-iterator/go)
|
||||||
[](https://codecov.io/gh/json-iterator/go)
|
[](https://codecov.io/gh/json-iterator/go)
|
||||||
[](https://goreportcard.com/report/github.com/json-iterator/go)
|
[](https://goreportcard.com/report/github.com/json-iterator/go)
|
||||||
@@ -19,7 +19,7 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu
|
|||||||
Raw Result (easyjson requires static code generation)
|
Raw Result (easyjson requires static code generation)
|
||||||
|
|
||||||
| | ns/op | allocation bytes | allocation times |
|
| | ns/op | allocation bytes | allocation times |
|
||||||
| --- | --- | --- | --- |
|
| --------------- | ----------- | ---------------- | ---------------- |
|
||||||
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
|
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
|
||||||
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
|
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
|
||||||
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
|
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
|
||||||
@@ -44,7 +44,7 @@ json.Marshal(&data)
|
|||||||
with
|
with
|
||||||
|
|
||||||
```go
|
```go
|
||||||
import "github.com/json-iterator/go"
|
import jsoniter "github.com/json-iterator/go"
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
json.Marshal(&data)
|
json.Marshal(&data)
|
||||||
@@ -60,7 +60,7 @@ json.Unmarshal(input, &data)
|
|||||||
with
|
with
|
||||||
|
|
||||||
```go
|
```go
|
||||||
import "github.com/json-iterator/go"
|
import jsoniter "github.com/json-iterator/go"
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
json.Unmarshal(input, &data)
|
json.Unmarshal(input, &data)
|
||||||
@@ -78,10 +78,10 @@ go get github.com/json-iterator/go
|
|||||||
|
|
||||||
Contributors
|
Contributors
|
||||||
|
|
||||||
* [thockin](https://github.com/thockin)
|
- [thockin](https://github.com/thockin)
|
||||||
* [mattn](https://github.com/mattn)
|
- [mattn](https://github.com/mattn)
|
||||||
* [cch123](https://github.com/cch123)
|
- [cch123](https://github.com/cch123)
|
||||||
* [Oleg Shaldybin](https://github.com/olegshaldybin)
|
- [Oleg Shaldybin](https://github.com/olegshaldybin)
|
||||||
* [Jason Toffaletti](https://github.com/toffaletti)
|
- [Jason Toffaletti](https://github.com/toffaletti)
|
||||||
|
|
||||||
Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
|
Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
|
||||||
|
|||||||
4
vendor/github.com/json-iterator/go/any_str.go
generated
vendored
4
vendor/github.com/json-iterator/go/any_str.go
generated
vendored
@@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 {
|
|||||||
|
|
||||||
flag := 1
|
flag := 1
|
||||||
startPos := 0
|
startPos := 0
|
||||||
endPos := 0
|
|
||||||
if any.val[0] == '+' || any.val[0] == '-' {
|
if any.val[0] == '+' || any.val[0] == '-' {
|
||||||
startPos = 1
|
startPos = 1
|
||||||
}
|
}
|
||||||
@@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 {
|
|||||||
flag = -1
|
flag = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
endPos := startPos
|
||||||
for i := startPos; i < len(any.val); i++ {
|
for i := startPos; i < len(any.val); i++ {
|
||||||
if any.val[i] >= '0' && any.val[i] <= '9' {
|
if any.val[i] >= '0' && any.val[i] <= '9' {
|
||||||
endPos = i + 1
|
endPos = i + 1
|
||||||
@@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
startPos := 0
|
startPos := 0
|
||||||
endPos := 0
|
|
||||||
|
|
||||||
if any.val[0] == '-' {
|
if any.val[0] == '-' {
|
||||||
return 0
|
return 0
|
||||||
@@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 {
|
|||||||
startPos = 1
|
startPos = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
endPos := startPos
|
||||||
for i := startPos; i < len(any.val); i++ {
|
for i := startPos; i < len(any.val); i++ {
|
||||||
if any.val[i] >= '0' && any.val[i] <= '9' {
|
if any.val[i] >= '0' && any.val[i] <= '9' {
|
||||||
endPos = i + 1
|
endPos = i + 1
|
||||||
|
|||||||
4
vendor/github.com/json-iterator/go/config.go
generated
vendored
4
vendor/github.com/json-iterator/go/config.go
generated
vendored
@@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
|
|||||||
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
|
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
|
||||||
rawMessage := *(*json.RawMessage)(ptr)
|
rawMessage := *(*json.RawMessage)(ptr)
|
||||||
iter := cfg.BorrowIterator([]byte(rawMessage))
|
iter := cfg.BorrowIterator([]byte(rawMessage))
|
||||||
|
defer cfg.ReturnIterator(iter)
|
||||||
iter.Read()
|
iter.Read()
|
||||||
if iter.Error != nil {
|
if iter.Error != nil && iter.Error != io.EOF {
|
||||||
stream.WriteRaw("null")
|
stream.WriteRaw("null")
|
||||||
} else {
|
} else {
|
||||||
cfg.ReturnIterator(iter)
|
|
||||||
stream.WriteRaw(string(rawMessage))
|
stream.WriteRaw(string(rawMessage))
|
||||||
}
|
}
|
||||||
}, func(ptr unsafe.Pointer) bool {
|
}, func(ptr unsafe.Pointer) bool {
|
||||||
|
|||||||
4
vendor/github.com/json-iterator/go/iter_object.go
generated
vendored
4
vendor/github.com/json-iterator/go/iter_object.go
generated
vendored
@@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
|
|||||||
if c == '}' {
|
if c == '}' {
|
||||||
return iter.decrementDepth()
|
return iter.decrementDepth()
|
||||||
}
|
}
|
||||||
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
|
iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
|
|||||||
if c == '}' {
|
if c == '}' {
|
||||||
return iter.decrementDepth()
|
return iter.decrementDepth()
|
||||||
}
|
}
|
||||||
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
|
iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
4
vendor/github.com/json-iterator/go/reflect_extension.go
generated
vendored
4
vendor/github.com/json-iterator/go/reflect_extension.go
generated
vendored
@@ -341,7 +341,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
|
|||||||
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
|
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if tag == "-" {
|
if tag == "-" || field.Name() == "_" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tagParts := strings.Split(tag, ",")
|
tagParts := strings.Split(tag, ",")
|
||||||
@@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole
|
|||||||
fieldNames = []string{tagProvidedFieldName}
|
fieldNames = []string{tagProvidedFieldName}
|
||||||
}
|
}
|
||||||
// private?
|
// private?
|
||||||
isNotExported := unicode.IsLower(rune(originalFieldName[0]))
|
isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
|
||||||
if isNotExported {
|
if isNotExported {
|
||||||
fieldNames = []string{}
|
fieldNames = []string{}
|
||||||
}
|
}
|
||||||
|
|||||||
68
vendor/github.com/json-iterator/go/reflect_map.go
generated
vendored
68
vendor/github.com/json-iterator/go/reflect_map.go
generated
vendored
@@ -49,20 +49,7 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||||||
return decoder
|
return decoder
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
switch typ.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
|
|
||||||
case reflect.Bool,
|
|
||||||
reflect.Uint8, reflect.Int8,
|
|
||||||
reflect.Uint16, reflect.Int16,
|
|
||||||
reflect.Uint32, reflect.Int32,
|
|
||||||
reflect.Uint64, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Int,
|
|
||||||
reflect.Float32, reflect.Float64,
|
|
||||||
reflect.Uintptr:
|
|
||||||
typ = reflect2.DefaultTypeOfKind(typ.Kind())
|
|
||||||
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
|
|
||||||
default:
|
|
||||||
ptrType := reflect2.PtrTo(typ)
|
ptrType := reflect2.PtrTo(typ)
|
||||||
if ptrType.Implements(unmarshalerType) {
|
if ptrType.Implements(unmarshalerType) {
|
||||||
return &referenceDecoder{
|
return &referenceDecoder{
|
||||||
@@ -88,6 +75,21 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||||||
valType: typ,
|
valType: typ,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch typ.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
|
||||||
|
case reflect.Bool,
|
||||||
|
reflect.Uint8, reflect.Int8,
|
||||||
|
reflect.Uint16, reflect.Int16,
|
||||||
|
reflect.Uint32, reflect.Int32,
|
||||||
|
reflect.Uint64, reflect.Int64,
|
||||||
|
reflect.Uint, reflect.Int,
|
||||||
|
reflect.Float32, reflect.Float64,
|
||||||
|
reflect.Uintptr:
|
||||||
|
typ = reflect2.DefaultTypeOfKind(typ.Kind())
|
||||||
|
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
|
||||||
|
default:
|
||||||
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
|
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||||||
return encoder
|
return encoder
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if typ == textMarshalerType {
|
||||||
|
return &directTextMarshalerEncoder{
|
||||||
|
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if typ.Implements(textMarshalerType) {
|
||||||
|
return &textMarshalerEncoder{
|
||||||
|
valType: typ,
|
||||||
|
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch typ.Kind() {
|
switch typ.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
|
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
|
||||||
@@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||||||
typ = reflect2.DefaultTypeOfKind(typ.Kind())
|
typ = reflect2.DefaultTypeOfKind(typ.Kind())
|
||||||
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
|
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
|
||||||
default:
|
default:
|
||||||
if typ == textMarshalerType {
|
|
||||||
return &directTextMarshalerEncoder{
|
|
||||||
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if typ.Implements(textMarshalerType) {
|
|
||||||
return &textMarshalerEncoder{
|
|
||||||
valType: typ,
|
|
||||||
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if typ.Kind() == reflect.Interface {
|
if typ.Kind() == reflect.Interface {
|
||||||
return &dynamicMapKeyEncoder{ctx, typ}
|
return &dynamicMapKeyEncoder{ctx, typ}
|
||||||
}
|
}
|
||||||
@@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
|||||||
if c == '}' {
|
if c == '}' {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c != '"' {
|
|
||||||
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
iter.unreadByte()
|
iter.unreadByte()
|
||||||
key := decoder.keyType.UnsafeNew()
|
key := decoder.keyType.UnsafeNew()
|
||||||
decoder.keyDecoder.Decode(key, iter)
|
decoder.keyDecoder.Decode(key, iter)
|
||||||
@@ -290,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
|||||||
stream.WriteObjectStart()
|
stream.WriteObjectStart()
|
||||||
mapIter := encoder.mapType.UnsafeIterate(ptr)
|
mapIter := encoder.mapType.UnsafeIterate(ptr)
|
||||||
subStream := stream.cfg.BorrowStream(nil)
|
subStream := stream.cfg.BorrowStream(nil)
|
||||||
|
subStream.Attachment = stream.Attachment
|
||||||
subIter := stream.cfg.BorrowIterator(nil)
|
subIter := stream.cfg.BorrowIterator(nil)
|
||||||
keyValues := encodedKeyValues{}
|
keyValues := encodedKeyValues{}
|
||||||
for mapIter.HasNext() {
|
for mapIter.HasNext() {
|
||||||
subStream.buf = make([]byte, 0, 64)
|
|
||||||
key, elem := mapIter.UnsafeNext()
|
key, elem := mapIter.UnsafeNext()
|
||||||
|
subStreamIndex := subStream.Buffered()
|
||||||
encoder.keyEncoder.Encode(key, subStream)
|
encoder.keyEncoder.Encode(key, subStream)
|
||||||
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
|
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
|
||||||
stream.Error = subStream.Error
|
stream.Error = subStream.Error
|
||||||
}
|
}
|
||||||
encodedKey := subStream.Buffer()
|
encodedKey := subStream.Buffer()[subStreamIndex:]
|
||||||
subIter.ResetBytes(encodedKey)
|
subIter.ResetBytes(encodedKey)
|
||||||
decodedKey := subIter.ReadString()
|
decodedKey := subIter.ReadString()
|
||||||
if stream.indention > 0 {
|
if stream.indention > 0 {
|
||||||
@@ -310,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
|||||||
encoder.elemEncoder.Encode(elem, subStream)
|
encoder.elemEncoder.Encode(elem, subStream)
|
||||||
keyValues = append(keyValues, encodedKV{
|
keyValues = append(keyValues, encodedKV{
|
||||||
key: decodedKey,
|
key: decodedKey,
|
||||||
keyValue: subStream.Buffer(),
|
keyValue: subStream.Buffer()[subStreamIndex:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(keyValues)
|
sort.Sort(keyValues)
|
||||||
@@ -320,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
|||||||
}
|
}
|
||||||
stream.Write(keyValue.keyValue)
|
stream.Write(keyValue.keyValue)
|
||||||
}
|
}
|
||||||
|
if subStream.Error != nil && stream.Error == nil {
|
||||||
|
stream.Error = subStream.Error
|
||||||
|
}
|
||||||
stream.WriteObjectEnd()
|
stream.WriteObjectEnd()
|
||||||
stream.cfg.ReturnStream(subStream)
|
stream.cfg.ReturnStream(subStream)
|
||||||
stream.cfg.ReturnIterator(subIter)
|
stream.cfg.ReturnIterator(subIter)
|
||||||
|
|||||||
4
vendor/github.com/json-iterator/go/reflect_optional.go
generated
vendored
4
vendor/github.com/json-iterator/go/reflect_optional.go
generated
vendored
@@ -2,7 +2,6 @@ package jsoniter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/modern-go/reflect2"
|
"github.com/modern-go/reflect2"
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||||||
ptrType := typ.(*reflect2.UnsafePtrType)
|
ptrType := typ.(*reflect2.UnsafePtrType)
|
||||||
elemType := ptrType.Elem()
|
elemType := ptrType.Elem()
|
||||||
decoder := decoderOfType(ctx, elemType)
|
decoder := decoderOfType(ctx, elemType)
|
||||||
if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
|
|
||||||
return &dereferenceDecoder{elemType, decoder}
|
|
||||||
}
|
|
||||||
return &OptionalDecoder{elemType, decoder}
|
return &OptionalDecoder{elemType, decoder}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
22
vendor/github.com/json-iterator/go/reflect_struct_decoder.go
generated
vendored
22
vendor/github.com/json-iterator/go/reflect_struct_decoder.go
generated
vendored
@@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
|
|||||||
for c = ','; c == ','; c = iter.nextToken() {
|
for c = ','; c == ','; c = iter.nextToken() {
|
||||||
decoder.decodeOneField(ptr, iter)
|
decoder.decodeOneField(ptr, iter)
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
if c != '}' {
|
if c != '}' {
|
||||||
@@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
@@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if iter.Error != nil && iter.Error != io.EOF {
|
if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
|
||||||
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
|
||||||
}
|
}
|
||||||
iter.decrementDepth()
|
iter.decrementDepth()
|
||||||
|
|||||||
1
vendor/github.com/json-iterator/go/reflect_struct_encoder.go
generated
vendored
1
vendor/github.com/json-iterator/go/reflect_struct_encoder.go
generated
vendored
@@ -200,6 +200,7 @@ type stringModeStringEncoder struct {
|
|||||||
|
|
||||||
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||||
tempStream := encoder.cfg.BorrowStream(nil)
|
tempStream := encoder.cfg.BorrowStream(nil)
|
||||||
|
tempStream.Attachment = stream.Attachment
|
||||||
defer encoder.cfg.ReturnStream(tempStream)
|
defer encoder.cfg.ReturnStream(tempStream)
|
||||||
encoder.elemEncoder.Encode(ptr, tempStream)
|
encoder.elemEncoder.Encode(ptr, tempStream)
|
||||||
stream.WriteString(string(tempStream.Buffer()))
|
stream.WriteString(string(tempStream.Buffer()))
|
||||||
|
|||||||
5
vendor/github.com/json-iterator/go/stream.go
generated
vendored
5
vendor/github.com/json-iterator/go/stream.go
generated
vendored
@@ -103,14 +103,14 @@ func (stream *Stream) Flush() error {
|
|||||||
if stream.Error != nil {
|
if stream.Error != nil {
|
||||||
return stream.Error
|
return stream.Error
|
||||||
}
|
}
|
||||||
n, err := stream.out.Write(stream.buf)
|
_, err := stream.out.Write(stream.buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if stream.Error == nil {
|
if stream.Error == nil {
|
||||||
stream.Error = err
|
stream.Error = err
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
stream.buf = stream.buf[n:]
|
stream.buf = stream.buf[:0]
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() {
|
|||||||
func (stream *Stream) WriteMore() {
|
func (stream *Stream) WriteMore() {
|
||||||
stream.writeByte(',')
|
stream.writeByte(',')
|
||||||
stream.writeIndention(0)
|
stream.writeIndention(0)
|
||||||
stream.Flush()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteArrayStart write [ with possible indention
|
// WriteArrayStart write [ with possible indention
|
||||||
|
|||||||
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
@@ -1,15 +1,10 @@
|
|||||||
language: go
|
language: go
|
||||||
go_import_path: github.com/pkg/errors
|
go_import_path: github.com/pkg/errors
|
||||||
go:
|
go:
|
||||||
- 1.4.x
|
|
||||||
- 1.5.x
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
- 1.13.x
|
||||||
- tip
|
- tip
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go test -v ./...
|
- make check
|
||||||
|
|||||||
44
vendor/github.com/pkg/errors/Makefile
generated
vendored
Normal file
44
vendor/github.com/pkg/errors/Makefile
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
PKGS := github.com/pkg/errors
|
||||||
|
SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
|
||||||
|
GO := go
|
||||||
|
|
||||||
|
check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
|
||||||
|
|
||||||
|
test:
|
||||||
|
$(GO) test $(PKGS)
|
||||||
|
|
||||||
|
vet: | test
|
||||||
|
$(GO) vet $(PKGS)
|
||||||
|
|
||||||
|
staticcheck:
|
||||||
|
$(GO) get honnef.co/go/tools/cmd/staticcheck
|
||||||
|
staticcheck -checks all $(PKGS)
|
||||||
|
|
||||||
|
misspell:
|
||||||
|
$(GO) get github.com/client9/misspell/cmd/misspell
|
||||||
|
misspell \
|
||||||
|
-locale GB \
|
||||||
|
-error \
|
||||||
|
*.md *.go
|
||||||
|
|
||||||
|
unconvert:
|
||||||
|
$(GO) get github.com/mdempsky/unconvert
|
||||||
|
unconvert -v $(PKGS)
|
||||||
|
|
||||||
|
ineffassign:
|
||||||
|
$(GO) get github.com/gordonklaus/ineffassign
|
||||||
|
find $(SRCDIRS) -name '*.go' | xargs ineffassign
|
||||||
|
|
||||||
|
pedantic: check errcheck
|
||||||
|
|
||||||
|
unparam:
|
||||||
|
$(GO) get mvdan.cc/unparam
|
||||||
|
unparam ./...
|
||||||
|
|
||||||
|
errcheck:
|
||||||
|
$(GO) get github.com/kisielk/errcheck
|
||||||
|
errcheck $(PKGS)
|
||||||
|
|
||||||
|
gofmt:
|
||||||
|
@echo Checking code is gofmted
|
||||||
|
@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
|
||||||
11
vendor/github.com/pkg/errors/README.md
generated
vendored
11
vendor/github.com/pkg/errors/README.md
generated
vendored
@@ -41,11 +41,18 @@ default:
|
|||||||
|
|
||||||
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
|
||||||
|
|
||||||
|
- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
|
||||||
|
- 1.0. Final release.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
|
Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
|
||||||
|
|
||||||
Before proposing a change, please discuss your change by raising an issue.
|
Before sending a PR, please discuss your change by raising an issue.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
8
vendor/github.com/pkg/errors/errors.go
generated
vendored
8
vendor/github.com/pkg/errors/errors.go
generated
vendored
@@ -82,7 +82,7 @@
|
|||||||
//
|
//
|
||||||
// if err, ok := err.(stackTracer); ok {
|
// if err, ok := err.(stackTracer); ok {
|
||||||
// for _, f := range err.StackTrace() {
|
// for _, f := range err.StackTrace() {
|
||||||
// fmt.Printf("%+s:%d", f)
|
// fmt.Printf("%+s:%d\n", f, f)
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
@@ -159,6 +159,9 @@ type withStack struct {
|
|||||||
|
|
||||||
func (w *withStack) Cause() error { return w.error }
|
func (w *withStack) Cause() error { return w.error }
|
||||||
|
|
||||||
|
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||||
|
func (w *withStack) Unwrap() error { return w.error }
|
||||||
|
|
||||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||||
switch verb {
|
switch verb {
|
||||||
case 'v':
|
case 'v':
|
||||||
@@ -241,6 +244,9 @@ type withMessage struct {
|
|||||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||||
func (w *withMessage) Cause() error { return w.cause }
|
func (w *withMessage) Cause() error { return w.cause }
|
||||||
|
|
||||||
|
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||||
|
func (w *withMessage) Unwrap() error { return w.cause }
|
||||||
|
|
||||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||||
switch verb {
|
switch verb {
|
||||||
case 'v':
|
case 'v':
|
||||||
|
|||||||
38
vendor/github.com/pkg/errors/go113.go
generated
vendored
Normal file
38
vendor/github.com/pkg/errors/go113.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
// +build go1.13
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
stderrors "errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Is reports whether any error in err's chain matches target.
|
||||||
|
//
|
||||||
|
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||||
|
// repeatedly calling Unwrap.
|
||||||
|
//
|
||||||
|
// An error is considered to match a target if it is equal to that target or if
|
||||||
|
// it implements a method Is(error) bool such that Is(target) returns true.
|
||||||
|
func Is(err, target error) bool { return stderrors.Is(err, target) }
|
||||||
|
|
||||||
|
// As finds the first error in err's chain that matches target, and if so, sets
|
||||||
|
// target to that error value and returns true.
|
||||||
|
//
|
||||||
|
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||||
|
// repeatedly calling Unwrap.
|
||||||
|
//
|
||||||
|
// An error matches target if the error's concrete value is assignable to the value
|
||||||
|
// pointed to by target, or if the error has a method As(interface{}) bool such that
|
||||||
|
// As(target) returns true. In the latter case, the As method is responsible for
|
||||||
|
// setting target.
|
||||||
|
//
|
||||||
|
// As will panic if target is not a non-nil pointer to either a type that implements
|
||||||
|
// error, or to any interface type. As returns false if err is nil.
|
||||||
|
func As(err error, target interface{}) bool { return stderrors.As(err, target) }
|
||||||
|
|
||||||
|
// Unwrap returns the result of calling the Unwrap method on err, if err's
|
||||||
|
// type contains an Unwrap method returning error.
|
||||||
|
// Otherwise, Unwrap returns nil.
|
||||||
|
func Unwrap(err error) error {
|
||||||
|
return stderrors.Unwrap(err)
|
||||||
|
}
|
||||||
58
vendor/github.com/pkg/errors/stack.go
generated
vendored
58
vendor/github.com/pkg/errors/stack.go
generated
vendored
@@ -5,10 +5,13 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Frame represents a program counter inside a stack frame.
|
// Frame represents a program counter inside a stack frame.
|
||||||
|
// For historical reasons if Frame is interpreted as a uintptr
|
||||||
|
// its value represents the program counter + 1.
|
||||||
type Frame uintptr
|
type Frame uintptr
|
||||||
|
|
||||||
// pc returns the program counter for this frame;
|
// pc returns the program counter for this frame;
|
||||||
@@ -37,6 +40,15 @@ func (f Frame) line() int {
|
|||||||
return line
|
return line
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// name returns the name of this function, if known.
|
||||||
|
func (f Frame) name() string {
|
||||||
|
fn := runtime.FuncForPC(f.pc())
|
||||||
|
if fn == nil {
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
return fn.Name()
|
||||||
|
}
|
||||||
|
|
||||||
// Format formats the frame according to the fmt.Formatter interface.
|
// Format formats the frame according to the fmt.Formatter interface.
|
||||||
//
|
//
|
||||||
// %s source file
|
// %s source file
|
||||||
@@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) {
|
|||||||
case 's':
|
case 's':
|
||||||
switch {
|
switch {
|
||||||
case s.Flag('+'):
|
case s.Flag('+'):
|
||||||
pc := f.pc()
|
io.WriteString(s, f.name())
|
||||||
fn := runtime.FuncForPC(pc)
|
io.WriteString(s, "\n\t")
|
||||||
if fn == nil {
|
io.WriteString(s, f.file())
|
||||||
io.WriteString(s, "unknown")
|
|
||||||
} else {
|
|
||||||
file, _ := fn.FileLine(pc)
|
|
||||||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
io.WriteString(s, path.Base(f.file()))
|
io.WriteString(s, path.Base(f.file()))
|
||||||
}
|
}
|
||||||
case 'd':
|
case 'd':
|
||||||
fmt.Fprintf(s, "%d", f.line())
|
io.WriteString(s, strconv.Itoa(f.line()))
|
||||||
case 'n':
|
case 'n':
|
||||||
name := runtime.FuncForPC(f.pc()).Name()
|
io.WriteString(s, funcname(f.name()))
|
||||||
io.WriteString(s, funcname(name))
|
|
||||||
case 'v':
|
case 'v':
|
||||||
f.Format(s, 's')
|
f.Format(s, 's')
|
||||||
io.WriteString(s, ":")
|
io.WriteString(s, ":")
|
||||||
@@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalText formats a stacktrace Frame as a text string. The output is the
|
||||||
|
// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
|
||||||
|
func (f Frame) MarshalText() ([]byte, error) {
|
||||||
|
name := f.name()
|
||||||
|
if name == "unknown" {
|
||||||
|
return []byte(name), nil
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
|
||||||
|
}
|
||||||
|
|
||||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||||
type StackTrace []Frame
|
type StackTrace []Frame
|
||||||
|
|
||||||
@@ -94,18 +110,32 @@ func (st StackTrace) Format(s fmt.State, verb rune) {
|
|||||||
switch {
|
switch {
|
||||||
case s.Flag('+'):
|
case s.Flag('+'):
|
||||||
for _, f := range st {
|
for _, f := range st {
|
||||||
fmt.Fprintf(s, "\n%+v", f)
|
io.WriteString(s, "\n")
|
||||||
|
f.Format(s, verb)
|
||||||
}
|
}
|
||||||
case s.Flag('#'):
|
case s.Flag('#'):
|
||||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(s, "%v", []Frame(st))
|
st.formatSlice(s, verb)
|
||||||
}
|
}
|
||||||
case 's':
|
case 's':
|
||||||
fmt.Fprintf(s, "%s", []Frame(st))
|
st.formatSlice(s, verb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// formatSlice will format this StackTrace into the given buffer as a slice of
|
||||||
|
// Frame, only valid when called with '%s' or '%v'.
|
||||||
|
func (st StackTrace) formatSlice(s fmt.State, verb rune) {
|
||||||
|
io.WriteString(s, "[")
|
||||||
|
for i, f := range st {
|
||||||
|
if i > 0 {
|
||||||
|
io.WriteString(s, " ")
|
||||||
|
}
|
||||||
|
f.Format(s, verb)
|
||||||
|
}
|
||||||
|
io.WriteString(s, "]")
|
||||||
|
}
|
||||||
|
|
||||||
// stack represents a stack of program counters.
|
// stack represents a stack of program counters.
|
||||||
type stack []uintptr
|
type stack []uintptr
|
||||||
|
|
||||||
|
|||||||
50
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
50
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
@@ -42,11 +43,27 @@ type Counter interface {
|
|||||||
Add(float64)
|
Add(float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExemplarAdder is implemented by Counters that offer the option of adding a
|
||||||
|
// value to the Counter together with an exemplar. Its AddWithExemplar method
|
||||||
|
// works like the Add method of the Counter interface but also replaces the
|
||||||
|
// currently saved exemplar (if any) with a new one, created from the provided
|
||||||
|
// value, the current time as timestamp, and the provided labels. Empty Labels
|
||||||
|
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||||
|
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||||
|
// of the provided labels are invalid, or if the provided labels contain more
|
||||||
|
// than 64 runes in total.
|
||||||
|
type ExemplarAdder interface {
|
||||||
|
AddWithExemplar(value float64, exemplar Labels)
|
||||||
|
}
|
||||||
|
|
||||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
//
|
//
|
||||||
|
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||||
|
// perform the corresponding type assertion.
|
||||||
|
//
|
||||||
// The returned implementation tracks the counter value in two separate
|
// The returned implementation tracks the counter value in two separate
|
||||||
// variables, a float64 and a uint64. The latter is used to track calls of the
|
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||||
// Inc method and calls of the Add method with a value that can be represented
|
// Inc method and calls of the Add method with a value that can be represented
|
||||||
@@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
|
|||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
|
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@@ -78,6 +95,9 @@ type counter struct {
|
|||||||
desc *Desc
|
desc *Desc
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
labelPairs []*dto.LabelPair
|
||||||
|
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
||||||
|
|
||||||
|
now func() time.Time // To mock out time.Now() for testing.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Desc() *Desc {
|
func (c *counter) Desc() *Desc {
|
||||||
@@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
|
|||||||
if v < 0 {
|
if v < 0 {
|
||||||
panic(errors.New("counter cannot decrease in value"))
|
panic(errors.New("counter cannot decrease in value"))
|
||||||
}
|
}
|
||||||
|
|
||||||
ival := uint64(v)
|
ival := uint64(v)
|
||||||
if float64(ival) == v {
|
if float64(ival) == v {
|
||||||
atomic.AddUint64(&c.valInt, ival)
|
atomic.AddUint64(&c.valInt, ival)
|
||||||
@@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||||
|
c.Add(v)
|
||||||
|
c.updateExemplar(v, e)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *counter) Inc() {
|
func (c *counter) Inc() {
|
||||||
atomic.AddUint64(&c.valInt, 1)
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
}
|
}
|
||||||
@@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
|
|||||||
ival := atomic.LoadUint64(&c.valInt)
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
val := fval + float64(ival)
|
val := fval + float64(ival)
|
||||||
|
|
||||||
return populateMetric(CounterValue, val, c.labelPairs, out)
|
var exemplar *dto.Exemplar
|
||||||
|
if e := c.exemplar.Load(); e != nil {
|
||||||
|
exemplar = e.(*dto.Exemplar)
|
||||||
|
}
|
||||||
|
|
||||||
|
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) updateExemplar(v float64, l Labels) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e, err := newExemplar(v, c.now(), l)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
c.exemplar.Store(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||||
@@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
|||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
}
|
}
|
||||||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}),
|
}),
|
||||||
@@ -267,6 +309,8 @@ type CounterFunc interface {
|
|||||||
// provided function must be concurrency-safe. The function should also honor
|
// provided function must be concurrency-safe. The function should also honor
|
||||||
// the contract for a Counter (values only go up, not down), but compliance will
|
// the contract for a Counter (values only go up, not down), but compliance will
|
||||||
// not be checked.
|
// not be checked.
|
||||||
|
//
|
||||||
|
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
|
||||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||||
return newValueFunc(NewDesc(
|
return newValueFunc(NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
|||||||
22
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
22
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@@ -19,6 +19,8 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
@@ -126,24 +128,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
vh := hashNew()
|
xxh := xxhash.New()
|
||||||
for _, val := range labelValues {
|
for _, val := range labelValues {
|
||||||
vh = hashAdd(vh, val)
|
xxh.WriteString(val)
|
||||||
vh = hashAddByte(vh, separatorByte)
|
xxh.Write(separatorByteSlice)
|
||||||
}
|
}
|
||||||
d.id = vh
|
d.id = xxh.Sum64()
|
||||||
// Sort labelNames so that order doesn't matter for the hash.
|
// Sort labelNames so that order doesn't matter for the hash.
|
||||||
sort.Strings(labelNames)
|
sort.Strings(labelNames)
|
||||||
// Now hash together (in this order) the help string and the sorted
|
// Now hash together (in this order) the help string and the sorted
|
||||||
// label names.
|
// label names.
|
||||||
lh := hashNew()
|
xxh.Reset()
|
||||||
lh = hashAdd(lh, help)
|
xxh.WriteString(help)
|
||||||
lh = hashAddByte(lh, separatorByte)
|
xxh.Write(separatorByteSlice)
|
||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
lh = hashAdd(lh, labelName)
|
xxh.WriteString(labelName)
|
||||||
lh = hashAddByte(lh, separatorByte)
|
xxh.Write(separatorByteSlice)
|
||||||
}
|
}
|
||||||
d.dimHash = lh
|
d.dimHash = xxh.Sum64()
|
||||||
|
|
||||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||||
for n, v := range constLabels {
|
for n, v := range constLabels {
|
||||||
|
|||||||
37
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
37
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@@ -84,25 +84,21 @@
|
|||||||
// of those four metric types can be found in the Prometheus docs:
|
// of those four metric types can be found in the Prometheus docs:
|
||||||
// https://prometheus.io/docs/concepts/metric_types/
|
// https://prometheus.io/docs/concepts/metric_types/
|
||||||
//
|
//
|
||||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||||
// Prometheus server not to assume anything about its type.
|
// Histogram, a very important part of the Prometheus data model is the
|
||||||
//
|
// partitioning of samples along dimensions called labels, which results in
|
||||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
|
||||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
|
||||||
// the partitioning of samples along dimensions called labels, which results in
|
|
||||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||||
// HistogramVec, and UntypedVec.
|
// and HistogramVec.
|
||||||
//
|
//
|
||||||
// While only the fundamental metric types implement the Metric interface, both
|
// While only the fundamental metric types implement the Metric interface, both
|
||||||
// the metrics and their vector versions implement the Collector interface. A
|
// the metrics and their vector versions implement the Collector interface. A
|
||||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
|
||||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
|
||||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
// and HistogramVec are not.
|
||||||
//
|
//
|
||||||
// To create instances of Metrics and their vector versions, you need a suitable
|
// To create instances of Metrics and their vector versions, you need a suitable
|
||||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
|
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||||
// UntypedOpts.
|
|
||||||
//
|
//
|
||||||
// Custom Collectors and constant Metrics
|
// Custom Collectors and constant Metrics
|
||||||
//
|
//
|
||||||
@@ -118,13 +114,16 @@
|
|||||||
// existing numbers into Prometheus Metrics during collection. An own
|
// existing numbers into Prometheus Metrics during collection. An own
|
||||||
// implementation of the Collector interface is perfect for that. You can create
|
// implementation of the Collector interface is perfect for that. You can create
|
||||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
// for all metric types with just a float64 as their value: Counter, Gauge, and
|
||||||
// instances, representative of the “throw-away” metrics to be created later.
|
// a special “type” called Untyped. Use the latter if you are not sure if the
|
||||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
|
||||||
// could return no Desc at all, which will mark the Collector “unchecked”. No
|
// happens in the Collect method. The Describe method has to return separate
|
||||||
// checks are performed at registration time, but metric consistency will still
|
// Desc instances, representative of the “throw-away” metrics to be created
|
||||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
|
||||||
|
// you could return no Desc at all, which will mark the Collector “unchecked”.
|
||||||
|
// No checks are performed at registration time, but metric consistency will
|
||||||
|
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
// implementer of the Collector. While this is not a desirable state, it is
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
|
|||||||
11
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
11
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
|
|||||||
|
|
||||||
func (g *gauge) Write(out *dto.Metric) error {
|
func (g *gauge) Write(out *dto.Metric) error {
|
||||||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||||
return populateMetric(GaugeValue, val, g.labelPairs, out)
|
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||||
@@ -273,9 +273,12 @@ type GaugeFunc interface {
|
|||||||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||||
// value reported is determined by calling the given function from within the
|
// value reported is determined by calling the given function from within the
|
||||||
// Write method. Take into account that metric collection may happen
|
// Write method. Take into account that metric collection may happen
|
||||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
// concurrently. Therefore, it must be safe to call the provided function
|
||||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
// concurrently.
|
||||||
// function must be concurrency-safe.
|
//
|
||||||
|
// NewGaugeFunc is a good way to create an “info” style metric with a constant
|
||||||
|
// value of 1. Example:
|
||||||
|
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
|
||||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||||
return newValueFunc(NewDesc(
|
return newValueFunc(NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
|
|||||||
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@@ -73,7 +73,7 @@ func NewGoCollector() Collector {
|
|||||||
nil, nil),
|
nil, nil),
|
||||||
gcDesc: NewDesc(
|
gcDesc: NewDesc(
|
||||||
"go_gc_duration_seconds",
|
"go_gc_duration_seconds",
|
||||||
"A summary of the GC invocation durations.",
|
"A summary of the pause duration of garbage collection cycles.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
goInfoDesc: NewDesc(
|
goInfoDesc: NewDesc(
|
||||||
"go_info",
|
"go_info",
|
||||||
|
|||||||
117
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
117
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@@ -20,7 +20,9 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
@@ -138,7 +140,7 @@ type HistogramOpts struct {
|
|||||||
// better covered by target labels set by the scraping Prometheus
|
// better covered by target labels set by the scraping Prometheus
|
||||||
// server, or by one specific metric (e.g. a build_info or a
|
// server, or by one specific metric (e.g. a build_info or a
|
||||||
// machine_role metric). See also
|
// machine_role metric). See also
|
||||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||||
ConstLabels Labels
|
ConstLabels Labels
|
||||||
|
|
||||||
// Buckets defines the buckets into which observations are counted. Each
|
// Buckets defines the buckets into which observations are counted. Each
|
||||||
@@ -151,6 +153,10 @@ type HistogramOpts struct {
|
|||||||
|
|
||||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||||
|
//
|
||||||
|
// The returned implementation also implements ExemplarObserver. It is safe to
|
||||||
|
// perform the corresponding type assertion. Exemplars are tracked separately
|
||||||
|
// for each bucket.
|
||||||
func NewHistogram(opts HistogramOpts) Histogram {
|
func NewHistogram(opts HistogramOpts) Histogram {
|
||||||
return newHistogram(
|
return newHistogram(
|
||||||
NewDesc(
|
NewDesc(
|
||||||
@@ -187,7 +193,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
desc: desc,
|
desc: desc,
|
||||||
upperBounds: opts.Buckets,
|
upperBounds: opts.Buckets,
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
|
counts: [2]*histogramCounts{{}, {}},
|
||||||
|
now: time.Now,
|
||||||
}
|
}
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
if i < len(h.upperBounds)-1 {
|
if i < len(h.upperBounds)-1 {
|
||||||
@@ -205,9 +212,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make buckets
|
// Finally we know the final length of h.upperBounds and can make buckets
|
||||||
// for both counts:
|
// for both counts as well as exemplars:
|
||||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
return h
|
return h
|
||||||
@@ -254,6 +262,9 @@ type histogram struct {
|
|||||||
|
|
||||||
upperBounds []float64
|
upperBounds []float64
|
||||||
labelPairs []*dto.LabelPair
|
labelPairs []*dto.LabelPair
|
||||||
|
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
|
||||||
|
|
||||||
|
now func() time.Time // To mock out time.Now() for testing.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Desc() *Desc {
|
func (h *histogram) Desc() *Desc {
|
||||||
@@ -261,36 +272,13 @@ func (h *histogram) Desc() *Desc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Observe(v float64) {
|
func (h *histogram) Observe(v float64) {
|
||||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
h.observe(v, h.findBucket(v))
|
||||||
// slightly faster than the binary search. If we really care, we could
|
}
|
||||||
// switch from one search strategy to the other depending on the number
|
|
||||||
// of buckets.
|
|
||||||
//
|
|
||||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
|
||||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
|
||||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
|
||||||
|
|
||||||
// We increment h.countAndHotIdx so that the counter in the lower
|
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
|
||||||
// 63 bits gets incremented. At the same time, we get the new value
|
i := h.findBucket(v)
|
||||||
// back, which we can use to find the currently-hot counts.
|
h.observe(v, i)
|
||||||
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
h.updateExemplar(v, i, e)
|
||||||
hotCounts := h.counts[n>>63]
|
|
||||||
|
|
||||||
if i < len(h.upperBounds) {
|
|
||||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
|
||||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Increment count last as we take it as a signal that the observation
|
|
||||||
// is complete.
|
|
||||||
atomic.AddUint64(&hotCounts.count, 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
func (h *histogram) Write(out *dto.Metric) error {
|
||||||
@@ -329,6 +317,18 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||||||
CumulativeCount: proto.Uint64(cumCount),
|
CumulativeCount: proto.Uint64(cumCount),
|
||||||
UpperBound: proto.Float64(upperBound),
|
UpperBound: proto.Float64(upperBound),
|
||||||
}
|
}
|
||||||
|
if e := h.exemplars[i].Load(); e != nil {
|
||||||
|
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
|
||||||
|
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
|
||||||
|
b := &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(count),
|
||||||
|
UpperBound: proto.Float64(math.Inf(1)),
|
||||||
|
Exemplar: e.(*dto.Exemplar),
|
||||||
|
}
|
||||||
|
his.Bucket = append(his.Bucket, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
out.Histogram = his
|
out.Histogram = his
|
||||||
@@ -352,6 +352,57 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findBucket returns the index of the bucket for the provided value, or
|
||||||
|
// len(h.upperBounds) for the +Inf bucket.
|
||||||
|
func (h *histogram) findBucket(v float64) int {
|
||||||
|
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||||
|
// slightly faster than the binary search. If we really care, we could
|
||||||
|
// switch from one search strategy to the other depending on the number
|
||||||
|
// of buckets.
|
||||||
|
//
|
||||||
|
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||||
|
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||||
|
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||||
|
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||||
|
return sort.SearchFloat64s(h.upperBounds, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// observe is the implementation for Observe without the findBucket part.
|
||||||
|
func (h *histogram) observe(v float64, bucket int) {
|
||||||
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||||
|
hotCounts := h.counts[n>>63]
|
||||||
|
|
||||||
|
if bucket < len(h.upperBounds) {
|
||||||
|
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateExemplar replaces the exemplar for the provided bucket. With empty
|
||||||
|
// labels, it's a no-op. It panics if any of the labels is invalid.
|
||||||
|
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e, err := newExemplar(v, h.now(), l)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
h.exemplars[bucket].Store(e)
|
||||||
|
}
|
||||||
|
|
||||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
||||||
// same Desc, but have different values for their variable labels. This is used
|
// same Desc, but have different values for their variable labels. This is used
|
||||||
// if you want to count the same thing partitioned by various dimensions
|
// if you want to count the same thing partitioned by various dimensions
|
||||||
@@ -556,7 +607,7 @@ func NewConstHistogram(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
||||||
// NewConstMetric would have returned an error.
|
// NewConstHistogram would have returned an error.
|
||||||
func MustNewConstHistogram(
|
func MustNewConstHistogram(
|
||||||
desc *Desc,
|
desc *Desc,
|
||||||
count uint64,
|
count uint64,
|
||||||
|
|||||||
4
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@@ -17,12 +17,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
const separatorByte byte = 255
|
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
||||||
|
|
||||||
// A Metric models a single sample value with its meta data being exported to
|
// A Metric models a single sample value with its meta data being exported to
|
||||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||||
|
|||||||
12
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
12
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
@@ -50,3 +50,15 @@ type ObserverVec interface {
|
|||||||
|
|
||||||
Collector
|
Collector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExemplarObserver is implemented by Observers that offer the option of
|
||||||
|
// observing a value together with an exemplar. Its ObserveWithExemplar method
|
||||||
|
// works like the Observe method of an Observer but also replaces the currently
|
||||||
|
// saved exemplar (if any) with a new one, created from the provided value, the
|
||||||
|
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
||||||
|
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
||||||
|
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
||||||
|
// invalid or if the provided labels contain more than 64 runes in total.
|
||||||
|
type ExemplarObserver interface {
|
||||||
|
ObserveWithExemplar(value float64, exemplar Labels)
|
||||||
|
}
|
||||||
|
|||||||
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@@ -33,18 +33,22 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type processMemoryCounters struct {
|
type processMemoryCounters struct {
|
||||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
// System interface description
|
||||||
|
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
|
||||||
|
|
||||||
|
// Refer to the Golang internal implementation
|
||||||
|
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
|
||||||
_ uint32
|
_ uint32
|
||||||
PageFaultCount uint32
|
PageFaultCount uint32
|
||||||
PeakWorkingSetSize uint64
|
PeakWorkingSetSize uintptr
|
||||||
WorkingSetSize uint64
|
WorkingSetSize uintptr
|
||||||
QuotaPeakPagedPoolUsage uint64
|
QuotaPeakPagedPoolUsage uintptr
|
||||||
QuotaPagedPoolUsage uint64
|
QuotaPagedPoolUsage uintptr
|
||||||
QuotaPeakNonPagedPoolUsage uint64
|
QuotaPeakNonPagedPoolUsage uintptr
|
||||||
QuotaNonPagedPoolUsage uint64
|
QuotaNonPagedPoolUsage uintptr
|
||||||
PagefileUsage uint64
|
PagefileUsage uintptr
|
||||||
PeakPagefileUsage uint64
|
PeakPagefileUsage uintptr
|
||||||
PrivateUsage uint64
|
PrivateUsage uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||||
|
|||||||
19
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
@@ -53,15 +53,21 @@ func (r *responseWriterDelegator) Written() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||||
|
if r.observeWriteHeader != nil && !r.wroteHeader {
|
||||||
|
// Only call observeWriteHeader for the 1st time. It's a bug if
|
||||||
|
// WriteHeader is called more than once, but we want to protect
|
||||||
|
// against it here. Note that we still delegate the WriteHeader
|
||||||
|
// to the original ResponseWriter to not mask the bug from it.
|
||||||
|
r.observeWriteHeader(code)
|
||||||
|
}
|
||||||
r.status = code
|
r.status = code
|
||||||
r.wroteHeader = true
|
r.wroteHeader = true
|
||||||
r.ResponseWriter.WriteHeader(code)
|
r.ResponseWriter.WriteHeader(code)
|
||||||
if r.observeWriteHeader != nil {
|
|
||||||
r.observeWriteHeader(code)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
if !r.wroteHeader {
|
if !r.wroteHeader {
|
||||||
r.WriteHeader(http.StatusOK)
|
r.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
@@ -82,12 +88,19 @@ func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
|||||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
}
|
}
|
||||||
func (d flusherDelegator) Flush() {
|
func (d flusherDelegator) Flush() {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
|
if !d.wroteHeader {
|
||||||
|
d.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
d.ResponseWriter.(http.Flusher).Flush()
|
d.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
return d.ResponseWriter.(http.Hijacker).Hijack()
|
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||||
}
|
}
|
||||||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
|
// handled appropriately.
|
||||||
if !d.wroteHeader {
|
if !d.wroteHeader {
|
||||||
d.WriteHeader(http.StatusOK)
|
d.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|||||||
64
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
64
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
var contentType expfmt.Format
|
||||||
|
if opts.EnableOpenMetrics {
|
||||||
|
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
|
||||||
|
} else {
|
||||||
|
contentType = expfmt.Negotiate(req.Header)
|
||||||
|
}
|
||||||
header := rsp.Header()
|
header := rsp.Header()
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
|
|
||||||
@@ -162,10 +167,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
|
|
||||||
enc := expfmt.NewEncoder(w, contentType)
|
enc := expfmt.NewEncoder(w, contentType)
|
||||||
|
|
||||||
var lastErr error
|
// handleError handles the error according to opts.ErrorHandling
|
||||||
for _, mf := range mfs {
|
// and returns true if we have to abort after the handling.
|
||||||
if err := enc.Encode(mf); err != nil {
|
handleError := func(err error) bool {
|
||||||
lastErr = err
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||||
}
|
}
|
||||||
@@ -173,17 +180,27 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
switch opts.ErrorHandling {
|
switch opts.ErrorHandling {
|
||||||
case PanicOnError:
|
case PanicOnError:
|
||||||
panic(err)
|
panic(err)
|
||||||
case ContinueOnError:
|
|
||||||
// Handled later.
|
|
||||||
case HTTPErrorOnError:
|
case HTTPErrorOnError:
|
||||||
httpError(rsp, err)
|
// We cannot really send an HTTP error at this
|
||||||
|
// point because we most likely have written
|
||||||
|
// something to rsp already. But at least we can
|
||||||
|
// stop sending.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Do nothing in all other cases, including ContinueOnError.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if handleError(enc.Encode(mf)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if closer, ok := enc.(expfmt.Closer); ok {
|
||||||
|
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
|
||||||
|
if handleError(closer.Close()) {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if lastErr != nil {
|
|
||||||
httpError(rsp, lastErr)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -255,7 +272,12 @@ type HandlerErrorHandling int
|
|||||||
// errors are encountered.
|
// errors are encountered.
|
||||||
const (
|
const (
|
||||||
// Serve an HTTP status code 500 upon the first error
|
// Serve an HTTP status code 500 upon the first error
|
||||||
// encountered. Report the error message in the body.
|
// encountered. Report the error message in the body. Note that HTTP
|
||||||
|
// errors cannot be served anymore once the beginning of a regular
|
||||||
|
// payload has been sent. Thus, in the (unlikely) case that encoding the
|
||||||
|
// payload into the negotiated wire format fails, serving the response
|
||||||
|
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
|
||||||
|
// those errors.
|
||||||
HTTPErrorOnError HandlerErrorHandling = iota
|
HTTPErrorOnError HandlerErrorHandling = iota
|
||||||
// Ignore errors and try to serve as many metrics as possible. However,
|
// Ignore errors and try to serve as many metrics as possible. However,
|
||||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||||
@@ -318,6 +340,16 @@ type HandlerOpts struct {
|
|||||||
// away). Until the implementation is improved, it is recommended to
|
// away). Until the implementation is improved, it is recommended to
|
||||||
// implement a separate timeout in potentially slow Collectors.
|
// implement a separate timeout in potentially slow Collectors.
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
|
// If true, the experimental OpenMetrics encoding is added to the
|
||||||
|
// possible options during content negotiation. Note that Prometheus
|
||||||
|
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
|
||||||
|
// the only way to transmit exemplars. However, the move to OpenMetrics
|
||||||
|
// is not completely transparent. Most notably, the values of "quantile"
|
||||||
|
// labels of Summaries and "le" labels of Histograms are formatted with
|
||||||
|
// a trailing ".0" if they would otherwise look like integer numbers
|
||||||
|
// (which changes the identity of the resulting series on the Prometheus
|
||||||
|
// server).
|
||||||
|
EnableOpenMetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
@@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// httpError removes any content-encoding header and then calls http.Error with
|
// httpError removes any content-encoding header and then calls http.Error with
|
||||||
// the provided error and http.StatusInternalServerErrer. Error contents is
|
// the provided error and http.StatusInternalServerError. Error contents is
|
||||||
// supposed to be uncompressed plain text. However, same as with a plain
|
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
|
||||||
// http.Error, any header settings will be void if the header has already been
|
// must not be called if the header or any payload has already been sent.
|
||||||
// sent. The error message will still be written to the writer, but it will
|
|
||||||
// probably be of limited use.
|
|
||||||
func httpError(rsp http.ResponseWriter, err error) {
|
func httpError(rsp http.ResponseWriter, err error) {
|
||||||
rsp.Header().Del(contentEncodingHeader)
|
rsp.Header().Del(contentEncodingHeader)
|
||||||
http.Error(
|
http.Error(
|
||||||
|
|||||||
33
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
33
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@@ -25,6 +25,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
@@ -74,7 +76,7 @@ func NewRegistry() *Registry {
|
|||||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||||
// actually been registered with the registry. Unchecked Collectors (those whose
|
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||||
// Describe methed does not yield any descriptors) are excluded from the check.
|
// Describe method does not yield any descriptors) are excluded from the check.
|
||||||
//
|
//
|
||||||
// Usually, a Registry will be happy as long as the union of all collected
|
// Usually, a Registry will be happy as long as the union of all collected
|
||||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||||
@@ -266,7 +268,7 @@ func (r *Registry) Register(c Collector) error {
|
|||||||
descChan = make(chan *Desc, capDescChan)
|
descChan = make(chan *Desc, capDescChan)
|
||||||
newDescIDs = map[uint64]struct{}{}
|
newDescIDs = map[uint64]struct{}{}
|
||||||
newDimHashesByName = map[string]uint64{}
|
newDimHashesByName = map[string]uint64{}
|
||||||
collectorID uint64 // Just a sum of all desc IDs.
|
collectorID uint64 // All desc IDs XOR'd together.
|
||||||
duplicateDescErr error
|
duplicateDescErr error
|
||||||
)
|
)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -293,12 +295,12 @@ func (r *Registry) Register(c Collector) error {
|
|||||||
if _, exists := r.descIDs[desc.id]; exists {
|
if _, exists := r.descIDs[desc.id]; exists {
|
||||||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
|
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
|
||||||
}
|
}
|
||||||
// If it is not a duplicate desc in this collector, add it to
|
// If it is not a duplicate desc in this collector, XOR it to
|
||||||
// the collectorID. (We allow duplicate descs within the same
|
// the collectorID. (We allow duplicate descs within the same
|
||||||
// collector, but their existence must be a no-op.)
|
// collector, but their existence must be a no-op.)
|
||||||
if _, exists := newDescIDs[desc.id]; !exists {
|
if _, exists := newDescIDs[desc.id]; !exists {
|
||||||
newDescIDs[desc.id] = struct{}{}
|
newDescIDs[desc.id] = struct{}{}
|
||||||
collectorID += desc.id
|
collectorID ^= desc.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// Are all the label names and the help string consistent with
|
// Are all the label names and the help string consistent with
|
||||||
@@ -360,7 +362,7 @@ func (r *Registry) Unregister(c Collector) bool {
|
|||||||
var (
|
var (
|
||||||
descChan = make(chan *Desc, capDescChan)
|
descChan = make(chan *Desc, capDescChan)
|
||||||
descIDs = map[uint64]struct{}{}
|
descIDs = map[uint64]struct{}{}
|
||||||
collectorID uint64 // Just a sum of the desc IDs.
|
collectorID uint64 // All desc IDs XOR'd together.
|
||||||
)
|
)
|
||||||
go func() {
|
go func() {
|
||||||
c.Describe(descChan)
|
c.Describe(descChan)
|
||||||
@@ -368,7 +370,7 @@ func (r *Registry) Unregister(c Collector) bool {
|
|||||||
}()
|
}()
|
||||||
for desc := range descChan {
|
for desc := range descChan {
|
||||||
if _, exists := descIDs[desc.id]; !exists {
|
if _, exists := descIDs[desc.id]; !exists {
|
||||||
collectorID += desc.id
|
collectorID ^= desc.id
|
||||||
descIDs[desc.id] = struct{}{}
|
descIDs[desc.id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -875,9 +877,9 @@ func checkMetricConsistency(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||||
h := hashNew()
|
h := xxhash.New()
|
||||||
h = hashAdd(h, name)
|
h.WriteString(name)
|
||||||
h = hashAddByte(h, separatorByte)
|
h.Write(separatorByteSlice)
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||||
// check.
|
// check.
|
||||||
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
||||||
@@ -888,18 +890,19 @@ func checkMetricConsistency(
|
|||||||
dtoMetric.Label = copiedLabels
|
dtoMetric.Label = copiedLabels
|
||||||
}
|
}
|
||||||
for _, lp := range dtoMetric.Label {
|
for _, lp := range dtoMetric.Label {
|
||||||
h = hashAdd(h, lp.GetName())
|
h.WriteString(lp.GetName())
|
||||||
h = hashAddByte(h, separatorByte)
|
h.Write(separatorByteSlice)
|
||||||
h = hashAdd(h, lp.GetValue())
|
h.WriteString(lp.GetValue())
|
||||||
h = hashAddByte(h, separatorByte)
|
h.Write(separatorByteSlice)
|
||||||
}
|
}
|
||||||
if _, exists := metricHashes[h]; exists {
|
hSum := h.Sum64()
|
||||||
|
if _, exists := metricHashes[hSum]; exists {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %q { %s} was collected before with the same name and label values",
|
"collected metric %q { %s} was collected before with the same name and label values",
|
||||||
name, dtoMetric,
|
name, dtoMetric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
metricHashes[h] = struct{}{}
|
metricHashes[hSum] = struct{}{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
3
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
"github.com/beorn7/perks/quantile"
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
@@ -208,7 +209,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|||||||
s := &noObjectivesSummary{
|
s := &noObjectivesSummary{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
|
counts: [2]*summaryCounts{{}, {}},
|
||||||
}
|
}
|
||||||
s.init(s) // Init self-collection.
|
s.init(s) // Init self-collection.
|
||||||
return s
|
return s
|
||||||
|
|||||||
51
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
51
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@@ -16,8 +16,12 @@ package prometheus
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
@@ -25,7 +29,8 @@ import (
|
|||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
type ValueType int
|
type ValueType int
|
||||||
|
|
||||||
// Possible values for the ValueType enum.
|
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
|
||||||
|
// with an unknown type.
|
||||||
const (
|
const (
|
||||||
_ ValueType = iota
|
_ ValueType = iota
|
||||||
CounterValue
|
CounterValue
|
||||||
@@ -69,7 +74,7 @@ func (v *valueFunc) Desc() *Desc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (v *valueFunc) Write(out *dto.Metric) error {
|
func (v *valueFunc) Write(out *dto.Metric) error {
|
||||||
return populateMetric(v.valType, v.function(), v.labelPairs, out)
|
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConstMetric returns a metric with one fixed value that cannot be
|
// NewConstMetric returns a metric with one fixed value that cannot be
|
||||||
@@ -116,19 +121,20 @@ func (m *constMetric) Desc() *Desc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *constMetric) Write(out *dto.Metric) error {
|
func (m *constMetric) Write(out *dto.Metric) error {
|
||||||
return populateMetric(m.valType, m.val, m.labelPairs, out)
|
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
func populateMetric(
|
func populateMetric(
|
||||||
t ValueType,
|
t ValueType,
|
||||||
v float64,
|
v float64,
|
||||||
labelPairs []*dto.LabelPair,
|
labelPairs []*dto.LabelPair,
|
||||||
|
e *dto.Exemplar,
|
||||||
m *dto.Metric,
|
m *dto.Metric,
|
||||||
) error {
|
) error {
|
||||||
m.Label = labelPairs
|
m.Label = labelPairs
|
||||||
switch t {
|
switch t {
|
||||||
case CounterValue:
|
case CounterValue:
|
||||||
m.Counter = &dto.Counter{Value: proto.Float64(v)}
|
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
|
||||||
case GaugeValue:
|
case GaugeValue:
|
||||||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
||||||
case UntypedValue:
|
case UntypedValue:
|
||||||
@@ -160,3 +166,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||||||
sort.Sort(labelPairSorter(labelPairs))
|
sort.Sort(labelPairSorter(labelPairs))
|
||||||
return labelPairs
|
return labelPairs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
||||||
|
const ExemplarMaxRunes = 64
|
||||||
|
|
||||||
|
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
||||||
|
// returned if any of the label names or values are invalid or if the total
|
||||||
|
// number of runes in the label names and values exceeds ExemplarMaxRunes.
|
||||||
|
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
||||||
|
e := &dto.Exemplar{}
|
||||||
|
e.Value = proto.Float64(value)
|
||||||
|
tsProto, err := ptypes.TimestampProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
e.Timestamp = tsProto
|
||||||
|
labelPairs := make([]*dto.LabelPair, 0, len(l))
|
||||||
|
var runes int
|
||||||
|
for name, value := range l {
|
||||||
|
if !checkLabelName(name) {
|
||||||
|
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
|
||||||
|
}
|
||||||
|
runes += utf8.RuneCountInString(name)
|
||||||
|
if !utf8.ValidString(value) {
|
||||||
|
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
|
||||||
|
}
|
||||||
|
runes += utf8.RuneCountInString(value)
|
||||||
|
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||||
|
Name: proto.String(name),
|
||||||
|
Value: proto.String(value),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if runes > ExemplarMaxRunes {
|
||||||
|
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
|
||||||
|
}
|
||||||
|
e.Label = labelPairs
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|||||||
14
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
14
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
|||||||
// their label values. metricVec is not used directly (and therefore
|
// their label values. metricVec is not used directly (and therefore
|
||||||
// unexported). It is used as a building block for implementations of vectors of
|
// unexported). It is used as a building block for implementations of vectors of
|
||||||
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
||||||
// It also handles label currying. It uses basicMetricVec internally.
|
// It also handles label currying.
|
||||||
type metricVec struct {
|
type metricVec struct {
|
||||||
*metricMap
|
*metricMap
|
||||||
|
|
||||||
@@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool {
|
|||||||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||||
|
// show up in GoDoc.
|
||||||
|
|
||||||
|
// Describe implements Collector.
|
||||||
|
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
|
||||||
|
|
||||||
|
// Collect implements Collector.
|
||||||
|
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
|
||||||
|
|
||||||
|
// Reset deletes all metrics in this vector.
|
||||||
|
func (m *metricVec) Reset() { m.metricMap.Reset() }
|
||||||
|
|
||||||
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
||||||
var (
|
var (
|
||||||
newCurry []curriedLabelValue
|
newCurry []curriedLabelValue
|
||||||
|
|||||||
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
@@ -27,7 +28,8 @@ import (
|
|||||||
// registered with the wrapped Registerer in a modified way. The modified
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
// Collector adds the provided Labels to all Metrics it collects (as
|
// Collector adds the provided Labels to all Metrics it collects (as
|
||||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||||
// duplicate any of those labels.
|
// duplicate any of those labels. Wrapping a nil value is valid, resulting
|
||||||
|
// in a no-op Registerer.
|
||||||
//
|
//
|
||||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||||
@@ -50,6 +52,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
|||||||
// Registerer. Collectors registered with the returned Registerer will be
|
// Registerer. Collectors registered with the returned Registerer will be
|
||||||
// registered with the wrapped Registerer in a modified way. The modified
|
// registered with the wrapped Registerer in a modified way. The modified
|
||||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||||
|
// Wrapping a nil value is valid, resulting in a no-op Registerer.
|
||||||
//
|
//
|
||||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||||
@@ -80,6 +83,9 @@ type wrappingRegisterer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||||
wrappedCollector: c,
|
wrappedCollector: c,
|
||||||
prefix: r.prefix,
|
prefix: r.prefix,
|
||||||
@@ -88,6 +94,9 @@ func (r *wrappingRegisterer) Register(c Collector) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
for _, c := range cs {
|
for _, c := range cs {
|
||||||
if err := r.Register(c); err != nil {
|
if err := r.Register(c); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -96,6 +105,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||||
|
if r.wrappedRegisterer == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||||
wrappedCollector: c,
|
wrappedCollector: c,
|
||||||
prefix: r.prefix,
|
prefix: r.prefix,
|
||||||
|
|||||||
112
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
112
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -30,17 +30,38 @@ type Encoder interface {
|
|||||||
Encode(*dto.MetricFamily) error
|
Encode(*dto.MetricFamily) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type encoder func(*dto.MetricFamily) error
|
// Closer is implemented by Encoders that need to be closed to finalize
|
||||||
|
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
|
||||||
func (e encoder) Encode(v *dto.MetricFamily) error {
|
//
|
||||||
return e(v)
|
// Note that all Encoder implementations returned from this package implement
|
||||||
|
// Closer, too, even if the Close call is a no-op. This happens in preparation
|
||||||
|
// for adding a Close method to the Encoder interface directly in a (mildly
|
||||||
|
// breaking) release in the future.
|
||||||
|
type Closer interface {
|
||||||
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Negotiate returns the Content-Type based on the given Accept header.
|
type encoderCloser struct {
|
||||||
// If no appropriate accepted type is found, FmtText is returned.
|
encode func(*dto.MetricFamily) error
|
||||||
|
close func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
|
||||||
|
return ec.encode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec encoderCloser) Close() error {
|
||||||
|
return ec.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negotiate returns the Content-Type based on the given Accept header. If no
|
||||||
|
// appropriate accepted type is found, FmtText is returned (which is the
|
||||||
|
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
|
||||||
|
// as the support is still experimental. To include the option to negotiate
|
||||||
|
// FmtOpenMetrics, use NegotiateOpenMetrics.
|
||||||
func Negotiate(h http.Header) Format {
|
func Negotiate(h http.Header) Format {
|
||||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||||
// Check for protocol buffer
|
ver := ac.Params["version"]
|
||||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
switch ac.Params["encoding"] {
|
switch ac.Params["encoding"] {
|
||||||
case "delimited":
|
case "delimited":
|
||||||
@@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
|
|||||||
return FmtProtoCompact
|
return FmtProtoCompact
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check for text format.
|
|
||||||
ver := ac.Params["version"]
|
|
||||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
return FmtText
|
return FmtText
|
||||||
}
|
}
|
||||||
@@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
|
|||||||
return FmtText
|
return FmtText
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEncoder returns a new encoder based on content type negotiation.
|
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||||
|
// FmtOpenMetrics as an option for the result. Note that this function is
|
||||||
|
// temporary and will disappear once FmtOpenMetrics is fully supported and as
|
||||||
|
// such may be negotiated by the normal Negotiate function.
|
||||||
|
func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||||
|
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||||
|
ver := ac.Params["version"]
|
||||||
|
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||||
|
switch ac.Params["encoding"] {
|
||||||
|
case "delimited":
|
||||||
|
return FmtProtoDelim
|
||||||
|
case "text":
|
||||||
|
return FmtProtoText
|
||||||
|
case "compact-text":
|
||||||
|
return FmtProtoCompact
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
|
||||||
|
return FmtOpenMetrics
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FmtText
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||||
|
// Encoder implementations returned by NewEncoder also implement Closer, and
|
||||||
|
// callers should always call the Close method. It is currently only required
|
||||||
|
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
|
||||||
|
// to the Encoder interface directly. The current version of the Encoder
|
||||||
|
// interface is kept for backwards compatibility.
|
||||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||||
switch format {
|
switch format {
|
||||||
case FmtProtoDelim:
|
case FmtProtoDelim:
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
_, err := pbutil.WriteDelimited(w, v)
|
_, err := pbutil.WriteDelimited(w, v)
|
||||||
return err
|
return err
|
||||||
})
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
case FmtProtoCompact:
|
case FmtProtoCompact:
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
_, err := fmt.Fprintln(w, v.String())
|
_, err := fmt.Fprintln(w, v.String())
|
||||||
return err
|
return err
|
||||||
})
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
case FmtProtoText:
|
case FmtProtoText:
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
||||||
return err
|
return err
|
||||||
})
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
|
}
|
||||||
case FmtText:
|
case FmtText:
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
_, err := MetricFamilyToText(w, v)
|
_, err := MetricFamilyToText(w, v)
|
||||||
return err
|
return err
|
||||||
})
|
},
|
||||||
|
close: func() error { return nil },
|
||||||
}
|
}
|
||||||
panic("expfmt.NewEncoder: unknown format")
|
case FmtOpenMetrics:
|
||||||
|
return encoderCloser{
|
||||||
|
encode: func(v *dto.MetricFamily) error {
|
||||||
|
_, err := MetricFamilyToOpenMetrics(w, v)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
close: func() error {
|
||||||
|
_, err := FinalizeOpenMetrics(w)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
|
||||||
}
|
}
|
||||||
|
|||||||
3
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
3
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -23,6 +23,8 @@ const (
|
|||||||
ProtoType = `application/vnd.google.protobuf`
|
ProtoType = `application/vnd.google.protobuf`
|
||||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||||
|
OpenMetricsType = `application/openmetrics-text`
|
||||||
|
OpenMetricsVersion = "0.0.1"
|
||||||
|
|
||||||
// The Content-Type values for the different wire protocols.
|
// The Content-Type values for the different wire protocols.
|
||||||
FmtUnknown Format = `<unknown>`
|
FmtUnknown Format = `<unknown>`
|
||||||
@@ -30,6 +32,7 @@ const (
|
|||||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
@@ -0,0 +1,527 @@
|
|||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package expfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||||
|
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||||
|
// the number of bytes written and any error encountered. The output will have
|
||||||
|
// the same order as the input, no further sorting is performed. Furthermore,
|
||||||
|
// this function assumes the input is already sanitized and does not perform any
|
||||||
|
// sanity checks. If the input contains duplicate metrics or invalid metric or
|
||||||
|
// label names, the conversion will result in invalid text format output.
|
||||||
|
//
|
||||||
|
// This function fulfills the type 'expfmt.encoder'.
|
||||||
|
//
|
||||||
|
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
|
||||||
|
// on individual metric families, it is the responsibility of the caller to
|
||||||
|
// append this line to 'out' once all metric families have been written.
|
||||||
|
// Conveniently, this can be done by calling FinalizeOpenMetrics.
|
||||||
|
//
|
||||||
|
// The output should be fully OpenMetrics compliant. However, there are a few
|
||||||
|
// missing features and peculiarities to avoid complications when switching from
|
||||||
|
// Prometheus to OpenMetrics or vice versa:
|
||||||
|
//
|
||||||
|
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||||
|
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||||
|
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||||
|
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||||
|
// output.
|
||||||
|
//
|
||||||
|
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||||
|
// line, info type, stateset type, gaugehistogram type.
|
||||||
|
//
|
||||||
|
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||||
|
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||||
|
//
|
||||||
|
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||||
|
// with a `NaN` value.)
|
||||||
|
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||||
|
name := in.GetName()
|
||||||
|
if name == "" {
|
||||||
|
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
|
// bufio.Writer from the sync.Pool.
|
||||||
|
w, ok := out.(enhancedWriter)
|
||||||
|
if !ok {
|
||||||
|
b := bufPool.Get().(*bufio.Writer)
|
||||||
|
b.Reset(out)
|
||||||
|
w = b
|
||||||
|
defer func() {
|
||||||
|
bErr := b.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = bErr
|
||||||
|
}
|
||||||
|
bufPool.Put(b)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
n int
|
||||||
|
metricType = in.GetType()
|
||||||
|
shortName = name
|
||||||
|
)
|
||||||
|
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||||
|
shortName = name[:len(name)-6]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comments, first HELP, then TYPE.
|
||||||
|
if in.Help != nil {
|
||||||
|
n, err = w.WriteString("# HELP ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(shortName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, *in.Help, true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = w.WriteString("# TYPE ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(shortName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if strings.HasSuffix(name, "_total") {
|
||||||
|
n, err = w.WriteString(" counter\n")
|
||||||
|
} else {
|
||||||
|
n, err = w.WriteString(" unknown\n")
|
||||||
|
}
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
n, err = w.WriteString(" gauge\n")
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
n, err = w.WriteString(" summary\n")
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
n, err = w.WriteString(" unknown\n")
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
n, err = w.WriteString(" histogram\n")
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally the samples, one line for each.
|
||||||
|
for _, metric := range in.Metric {
|
||||||
|
switch metricType {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
if metric.Counter == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected counter in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// Note that we have ensured above that either the name
|
||||||
|
// ends on `_total` or that the rendered type is
|
||||||
|
// `unknown`. Therefore, no `_total` must be added here.
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Counter.GetValue(), 0, false,
|
||||||
|
metric.Counter.Exemplar,
|
||||||
|
)
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
if metric.Gauge == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected gauge in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Gauge.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_UNTYPED:
|
||||||
|
if metric.Untyped == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected untyped in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric, "", 0,
|
||||||
|
metric.Untyped.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
if metric.Summary == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected summary in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for _, q := range metric.Summary.Quantile {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "", metric,
|
||||||
|
model.QuantileLabel, q.GetQuantile(),
|
||||||
|
q.GetValue(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Summary.GetSampleSum(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
0, metric.Summary.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
if metric.Histogram == nil {
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"expected histogram in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
infSeen := false
|
||||||
|
for _, b := range metric.Histogram.Bucket {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, b.GetUpperBound(),
|
||||||
|
0, b.GetCumulativeCount(), true,
|
||||||
|
b.Exemplar,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if math.IsInf(b.GetUpperBound(), +1) {
|
||||||
|
infSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !infSeen {
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_bucket", metric,
|
||||||
|
model.BucketLabel, math.Inf(+1),
|
||||||
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_sum", metric, "", 0,
|
||||||
|
metric.Histogram.GetSampleSum(), 0, false,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsSample(
|
||||||
|
w, name, "_count", metric, "", 0,
|
||||||
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
default:
|
||||||
|
return written, fmt.Errorf(
|
||||||
|
"unexpected type in metric %s %s", name, metric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
|
||||||
|
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
|
||||||
|
return w.Write([]byte("# EOF\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
|
||||||
|
// w, given the metric name, the metric proto message itself, optionally an
|
||||||
|
// additional label name with a float64 value (use empty string as label name if
|
||||||
|
// not required), the value (optionally as float64 or uint64, determined by
|
||||||
|
// useIntValue), and optionally an exemplar (use nil if not required). The
|
||||||
|
// function returns the number of bytes written and any error encountered.
|
||||||
|
func writeOpenMetricsSample(
|
||||||
|
w enhancedWriter,
|
||||||
|
name, suffix string,
|
||||||
|
metric *dto.Metric,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
floatValue float64, intValue uint64, useIntValue bool,
|
||||||
|
exemplar *dto.Exemplar,
|
||||||
|
) (int, error) {
|
||||||
|
var written int
|
||||||
|
n, err := w.WriteString(name)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if suffix != "" {
|
||||||
|
n, err = w.WriteString(suffix)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsLabelPairs(
|
||||||
|
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if useIntValue {
|
||||||
|
n, err = writeUint(w, intValue)
|
||||||
|
} else {
|
||||||
|
n, err = writeOpenMetricsFloat(w, floatValue)
|
||||||
|
}
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if metric.TimestampMs != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
// TODO(beorn7): Format this directly without converting to a float first.
|
||||||
|
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if exemplar != nil {
|
||||||
|
n, err = writeExemplar(w, exemplar)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
|
||||||
|
// in OpenMetrics style.
|
||||||
|
func writeOpenMetricsLabelPairs(
|
||||||
|
w enhancedWriter,
|
||||||
|
in []*dto.LabelPair,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
) (int, error) {
|
||||||
|
if len(in) == 0 && additionalLabelName == "" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
written int
|
||||||
|
separator byte = '{'
|
||||||
|
)
|
||||||
|
for _, lp := range in {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(lp.GetName())
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
separator = ','
|
||||||
|
}
|
||||||
|
if additionalLabelName != "" {
|
||||||
|
err := w.WriteByte(separator)
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err := w.WriteString(additionalLabelName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = w.WriteString(`="`)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte('"')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := w.WriteByte('}')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||||
|
// function returns the number of bytes written and any error encountered.
|
||||||
|
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||||
|
written := 0
|
||||||
|
n, err := w.WriteString(" # ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
n, err = writeOpenMetricsFloat(w, e.GetValue())
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
if e.Timestamp != nil {
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
ts, err := ptypes.Timestamp((*e).Timestamp)
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
// TODO(beorn7): Format this directly from components of ts to
|
||||||
|
// avoid overflow/underflow and precision issues of the float
|
||||||
|
// conversion.
|
||||||
|
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
|
||||||
|
// number would otherwise contain neither a "." nor an "e".
|
||||||
|
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
|
||||||
|
switch {
|
||||||
|
case f == 1:
|
||||||
|
return w.WriteString("1.0")
|
||||||
|
case f == 0:
|
||||||
|
return w.WriteString("0.0")
|
||||||
|
case f == -1:
|
||||||
|
return w.WriteString("-1.0")
|
||||||
|
case math.IsNaN(f):
|
||||||
|
return w.WriteString("NaN")
|
||||||
|
case math.IsInf(f, +1):
|
||||||
|
return w.WriteString("+Inf")
|
||||||
|
case math.IsInf(f, -1):
|
||||||
|
return w.WriteString("-Inf")
|
||||||
|
default:
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||||
|
if !bytes.ContainsAny(*bp, "e.") {
|
||||||
|
*bp = append(*bp, '.', '0')
|
||||||
|
}
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUint is like writeInt just for uint64.
|
||||||
|
func writeUint(w enhancedWriter, u uint64) (int, error) {
|
||||||
|
bp := numBufPool.Get().(*[]byte)
|
||||||
|
*bp = strconv.AppendUint((*bp)[:0], u, 10)
|
||||||
|
written, err := w.Write(*bp)
|
||||||
|
numBufPool.Put(bp)
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
21
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
21
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@@ -14,9 +14,10 @@
|
|||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -27,7 +28,7 @@ import (
|
|||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
|
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
|
||||||
// implements it.
|
// implements it.
|
||||||
type enhancedWriter interface {
|
type enhancedWriter interface {
|
||||||
io.Writer
|
io.Writer
|
||||||
@@ -37,14 +38,13 @@ type enhancedWriter interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
initialBufSize = 512
|
|
||||||
initialNumBufSize = 24
|
initialNumBufSize = 24
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bufPool = sync.Pool{
|
bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
|
return bufio.NewWriter(ioutil.Discard)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
numBufPool = sync.Pool{
|
numBufPool = sync.Pool{
|
||||||
@@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try the interface upgrade. If it doesn't work, we'll use a
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
// bytes.Buffer from the sync.Pool and write out its content to out in a
|
// bufio.Writer from the sync.Pool.
|
||||||
// single go in the end.
|
|
||||||
w, ok := out.(enhancedWriter)
|
w, ok := out.(enhancedWriter)
|
||||||
if !ok {
|
if !ok {
|
||||||
b := bufPool.Get().(*bytes.Buffer)
|
b := bufPool.Get().(*bufio.Writer)
|
||||||
b.Reset()
|
b.Reset(out)
|
||||||
w = b
|
w = b
|
||||||
defer func() {
|
defer func() {
|
||||||
bWritten, bErr := out.Write(b.Bytes())
|
bErr := b.Flush()
|
||||||
written = bWritten
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = bErr
|
err = bErr
|
||||||
}
|
}
|
||||||
@@ -425,9 +423,8 @@ var (
|
|||||||
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
||||||
if includeDoubleQuote {
|
if includeDoubleQuote {
|
||||||
return quotedEscaper.WriteString(w, v)
|
return quotedEscaper.WriteString(w, v)
|
||||||
} else {
|
|
||||||
return escaper.WriteString(w, v)
|
|
||||||
}
|
}
|
||||||
|
return escaper.WriteString(w, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user