Bump dependencies to Kubernetes 1.20

This commit is contained in:
xing-yang
2020-12-14 18:01:04 +00:00
parent a9a28d8fc7
commit e22364e1cf
799 changed files with 85231 additions and 28917 deletions

View File

@@ -25,7 +25,7 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/kubernetes-csi/csi-lib-utils/connection" "github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/metrics" "github.com/kubernetes-csi/csi-lib-utils/metrics"
"github.com/kubernetes-csi/csi-test/driver" "github.com/kubernetes-csi/csi-test/v4/driver"
"google.golang.org/grpc" "google.golang.org/grpc"
) )

94
go.mod
View File

@@ -3,49 +3,69 @@ module github.com/kubernetes-csi/external-snapshotter/v4
go 1.15 go 1.15
require ( require (
github.com/container-storage-interface/spec v1.2.0 github.com/container-storage-interface/spec v1.3.0
github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify v1.4.9
github.com/golang/mock v1.4.3 github.com/go-logr/logr v0.3.0 // indirect
github.com/golang/protobuf v1.4.2 github.com/golang/mock v1.4.4
github.com/google/gofuzz v1.1.0 github.com/golang/protobuf v1.4.3
github.com/imdario/mergo v0.3.9 // indirect github.com/google/go-cmp v0.5.4 // indirect
github.com/google/gofuzz v1.2.0
github.com/googleapis/gnostic v0.5.3 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/kubernetes-csi/csi-lib-utils v0.9.0 github.com/kubernetes-csi/csi-lib-utils v0.9.0
github.com/kubernetes-csi/csi-test v2.0.0+incompatible github.com/kubernetes-csi/csi-test/v4 v4.0.2
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0
github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_golang v1.8.0
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.10.0 github.com/prometheus/common v0.15.0
github.com/spf13/cobra v1.0.0 github.com/spf13/cobra v1.1.1
google.golang.org/grpc v1.29.0 golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 // indirect
k8s.io/api v0.19.0 golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 // indirect
k8s.io/apimachinery v0.19.0 golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
k8s.io/client-go v0.19.1 golang.org/x/sys v0.0.0-20201214095126-aec9a390925b // indirect
k8s.io/component-base v0.19.0 golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
k8s.io/klog/v2 v2.3.0 golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
k8s.io/kubernetes v1.19.0 google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20201211151036-40ec1c210f7a // indirect
google.golang.org/grpc v1.34.0
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.20.0
k8s.io/apimachinery v0.20.0
k8s.io/client-go v1.20.0
k8s.io/component-base v0.20.0
k8s.io/klog/v2 v2.4.0
k8s.io/kubernetes v1.20.0
) )
replace ( replace (
github.com/kubernetes-csi/external-snapshotter/client/v4 => ./client github.com/kubernetes-csi/external-snapshotter/client/v4 => ./client
k8s.io/api => k8s.io/api v0.19.0 k8s.io/api => k8s.io/api v0.20.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.0 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0
k8s.io/apimachinery => k8s.io/apimachinery v0.19.0 k8s.io/apimachinery => k8s.io/apimachinery v0.20.0
k8s.io/apiserver => k8s.io/apiserver v0.19.0 k8s.io/apiserver => k8s.io/apiserver v0.20.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.0 k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0
k8s.io/client-go => k8s.io/client-go v0.19.0 k8s.io/client-go => k8s.io/client-go v0.20.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.0 k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.0 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0
k8s.io/code-generator => k8s.io/code-generator v0.19.0 k8s.io/code-generator => k8s.io/code-generator v0.20.0
k8s.io/component-base => k8s.io/component-base v0.19.0 k8s.io/component-base => k8s.io/component-base v0.20.0
k8s.io/cri-api => k8s.io/cri-api v0.19.0 k8s.io/component-helpers => k8s.io/component-helpers v0.20.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.0 k8s.io/controller-manager => k8s.io/controller-manager v0.20.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.0 k8s.io/cri-api => k8s.io/cri-api v0.20.0
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.0 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.0
k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.0 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.0
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.0
k8s.io/kubectl => k8s.io/kubectl v0.19.0 k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.0
k8s.io/kubelet => k8s.io/kubelet v0.19.0 k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.0 k8s.io/kubectl => k8s.io/kubectl v0.20.0
k8s.io/metrics => k8s.io/metrics v0.19.0 k8s.io/kubelet => k8s.io/kubelet v0.20.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.0 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.0
k8s.io/metrics => k8s.io/metrics v0.20.0
k8s.io/mount-utils => k8s.io/mount-utils v0.20.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.0
) )
replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.0
replace k8s.io/sample-controller => k8s.io/sample-controller v0.20.0

555
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ import (
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
"github.com/kubernetes-csi/csi-lib-utils/connection" "github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/metrics" "github.com/kubernetes-csi/csi-lib-utils/metrics"
"github.com/kubernetes-csi/csi-test/driver" "github.com/kubernetes-csi/csi-test/v4/driver"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"

21
vendor/github.com/blang/semver/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,21 @@
language: go
matrix:
include:
- go: 1.4.3
- go: 1.5.4
- go: 1.6.3
- go: 1.7
- go: tip
allow_failures:
- go: tip
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
-repotoken $COVERALLS_TOKEN
- echo "Build examples" ; cd examples && go build
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
env:
global:
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=

View File

@@ -1,4 +1,4 @@
semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
====== ======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
@@ -41,6 +41,7 @@ Features
- Compare Helper Methods - Compare Helper Methods
- InPlace manipulation - InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` - Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface) - Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer) - database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler) - encoding/json compatible (json.Marshaler/Unmarshaler)
@@ -59,6 +60,8 @@ A condition is composed of an operator and a version. The supported operators ar
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` - `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
Note that spaces between the operator and the version will be gracefully tolerated.
A `Range` can link multiple `Ranges` separated by space: A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND: Ranges can be linked by logical AND:

View File

@@ -12,6 +12,6 @@
"license": "MIT", "license": "MIT",
"name": "semver", "name": "semver",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"", "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "3.4.0" "version": "3.5.1"
} }

File diff suppressed because it is too large Load Diff

View File

@@ -41,6 +41,8 @@ There are implementations for the following logging libraries:
- **log** (the Go standard library logger): - **log** (the Go standard library logger):
[stdr](https://github.com/go-logr/stdr) [stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
# FAQ # FAQ

35
vendor/github.com/go-logr/logr/discard.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
package logr
// Discard returns a valid Logger that discards all messages logged to it.
// It can be used whenever the caller is not interested in the logs.
func Discard() Logger {
return discardLogger{}
}
// discardLogger is a Logger that discards all messages.
type discardLogger struct{}
func (l discardLogger) Enabled() bool {
return false
}
func (l discardLogger) Info(msg string, keysAndValues ...interface{}) {
}
func (l discardLogger) Error(err error, msg string, keysAndValues ...interface{}) {
}
func (l discardLogger) V(level int) Logger {
return l
}
func (l discardLogger) WithValues(keysAndValues ...interface{}) Logger {
return l
}
func (l discardLogger) WithName(name string) Logger {
return l
}
// Verify that it actually implements the interface
var _ Logger = discardLogger{}

View File

@@ -40,17 +40,16 @@ limitations under the License.
// we want to log that we've made some decision. // we want to log that we've made some decision.
// //
// With the traditional log package, we might write: // With the traditional log package, we might write:
// log.Printf( // log.Printf("decided to set field foo to value %q for object %s/%s",
// "decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name) // targetValue, object.Namespace, object.Name)
// //
// With logr's structured logging, we'd write: // With logr's structured logging, we'd write:
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", // // elsewhere in the file, set up the logger to log with the prefix of
// // and the named value target-type=Foo, for extra context. // // "reconcilers", and the named value target-type=Foo, for extra context.
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") // log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
// //
// // later on... // // later on...
// log.Info("setting field foo on object", "value", targetValue, "object", object) // log.Info("setting foo on object", "value", targetValue, "object", object)
// //
// Depending on our logging implementation, we could then make logging decisions // Depending on our logging implementation, we could then make logging decisions
// based on field values (like only logging such events for objects in a certain // based on field values (like only logging such events for objects in a certain
@@ -78,9 +77,9 @@ limitations under the License.
// Each log message from a Logger has four types of context: // Each log message from a Logger has four types of context:
// logger name, log verbosity, log message, and the named values. // logger name, log verbosity, log message, and the named values.
// //
// The Logger name constists of a series of name "segments" added by successive // The Logger name consists of a series of name "segments" added by successive
// calls to WithName. These name segments will be joined in some way by the // calls to WithName. These name segments will be joined in some way by the
// underlying implementation. It is strongly reccomended that name segements // underlying implementation. It is strongly recommended that name segments
// contain simple identifiers (letters, digits, and hyphen), and do not contain // contain simple identifiers (letters, digits, and hyphen), and do not contain
// characters that could muddle the log output or confuse the joining operation // characters that could muddle the log output or confuse the joining operation
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). // (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
@@ -91,8 +90,8 @@ limitations under the License.
// and log messages for users to filter on. It's illegal to pass a log level // and log messages for users to filter on. It's illegal to pass a log level
// below zero. // below zero.
// //
// The log message consists of a constant message attached to the the log line. // The log message consists of a constant message attached to the log line.
// This should generally be a simple description of what's occuring, and should // This should generally be a simple description of what's occurring, and should
// never be a format string. // never be a format string.
// //
// Variable information can then be attached using named values (key/value // Variable information can then be attached using named values (key/value
@@ -125,11 +124,25 @@ limitations under the License.
// - `"ts"`: the timestamp for a log line. // - `"ts"`: the timestamp for a log line.
// //
// Implementations are encouraged to make use of these keys to represent the // Implementations are encouraged to make use of these keys to represent the
// above concepts, when neccessary (for example, in a pure-JSON output form, it // above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary // would be necessary to represent at least message and timestamp as ordinary
// named values). // named values).
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
// // Underlier exposes access to the underlying logging implementation.
// // Since callers only have a logr.Logger, they have to know which
// // implementation is in use, so this interface is less of an abstraction
// // and more of way to test type conversion.
// type Underlier interface {
// GetUnderlying() <underlying-type>
// }
package logr package logr
import (
"context"
)
// TODO: consider adding back in format strings if they're really needed // TODO: consider adding back in format strings if they're really needed
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) // TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats // TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
@@ -171,8 +184,39 @@ type Logger interface {
// WithName adds a new element to the logger's name. // WithName adds a new element to the logger's name.
// Successive calls with WithName continue to append // Successive calls with WithName continue to append
// suffixes to the logger's name. It's strongly reccomended // suffixes to the logger's name. It's strongly recommended
// that name segments contain only letters, digits, and hyphens // that name segments contain only letters, digits, and hyphens
// (see the package documentation for more information). // (see the package documentation for more information).
WithName(name string) Logger WithName(name string) Logger
} }
// InfoLogger provides compatibility with code that relies on the v0.1.0 interface
// Deprecated: use Logger instead. This will be removed in a future release.
type InfoLogger = Logger
type contextKey struct{}
// FromContext returns a Logger constructed from ctx or nil if no
// logger details are found.
func FromContext(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return nil
}
// FromContextOrDiscard returns a Logger constructed from ctx or a Logger
// that discards all messages if no logger details are found.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return discardLogger{}
}
// NewContext returns a new context derived from ctx that embeds the Logger.
func NewContext(ctx context.Context, l Logger) context.Context {
return context.WithValue(ctx, contextKey{}, l)
}

View File

@@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) {
if i > utf8.MaxRune { if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
} }
return string(i), s, nil return string(rune(i)), s, nil
} }
return "", "", fmt.Errorf(`unknown escape \%c`, r) return "", "", fmt.Errorf(`unknown escape \%c`, r)
} }

View File

@@ -1,11 +1,15 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// Package cmp determines equality of values. // Package cmp determines equality of values.
// //
// This package is intended to be a more powerful and safer alternative to // This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal. // reflect.DeepEqual for comparing whether two values are semantically equal.
// It is intended to only be used in tests, as performance is not a goal and
// it may panic if it cannot compare the values. Its propensity towards
// panicking means that its unsuitable for production environments where a
// spurious panic may be fatal.
// //
// The primary features of cmp are: // The primary features of cmp are:
// //
@@ -86,6 +90,52 @@ import (
// If there is a cycle, then the pointed at values are considered equal // If there is a cycle, then the pointed at values are considered equal
// only if both addresses were previously visited in the same path step. // only if both addresses were previously visited in the same path step.
func Equal(x, y interface{}, opts ...Option) bool { func Equal(x, y interface{}, opts ...Option) bool {
s := newState(opts)
s.compareAny(rootStep(x, y))
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values:
// y - x. It returns an empty string if and only if Equal returns true for the
// same input values and options.
//
// The output is displayed as a literal in pseudo-Go syntax.
// At the start of each line, a "-" prefix indicates an element removed from x,
// a "+" prefix to indicates an element added from y, and the lack of a prefix
// indicates an element common to both x and y. If possible, the output
// uses fmt.Stringer.String or error.Error methods to produce more humanly
// readable outputs. In such cases, the string is prefixed with either an
// 's' or 'e' character, respectively, to indicate that the method was called.
//
// Do not depend on this output being stable. If you need the ability to
// programmatically interpret the difference, consider using a custom Reporter.
func Diff(x, y interface{}, opts ...Option) string {
s := newState(opts)
// Optimization: If there are no other reporters, we can optimize for the
// common case where the result is equal (and thus no reported difference).
// This avoids the expensive construction of a difference tree.
if len(s.reporters) == 0 {
s.compareAny(rootStep(x, y))
if s.result.Equal() {
return ""
}
s.result = diff.Result{} // Reset results
}
r := new(defaultReporter)
s.reporters = append(s.reporters, reporter{r})
s.compareAny(rootStep(x, y))
d := r.String()
if (d == "") != s.result.Equal() {
panic("inconsistent difference and equality results")
}
return d
}
// rootStep constructs the first path step. If x and y have differing types,
// then they are stored within an empty interface type.
func rootStep(x, y interface{}) PathStep {
vx := reflect.ValueOf(x) vx := reflect.ValueOf(x)
vy := reflect.ValueOf(y) vy := reflect.ValueOf(y)
@@ -108,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool {
t = vx.Type() t = vx.Type()
} }
s := newState(opts) return &pathStep{t, vx, vy}
s.compareAny(&pathStep{t, vx, vy})
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values.
// It returns an empty string if and only if Equal returns true for the same
// input values and options.
//
// The output is displayed as a literal in pseudo-Go syntax.
// At the start of each line, a "-" prefix indicates an element removed from x,
// a "+" prefix to indicates an element added to y, and the lack of a prefix
// indicates an element common to both x and y. If possible, the output
// uses fmt.Stringer.String or error.Error methods to produce more humanly
// readable outputs. In such cases, the string is prefixed with either an
// 's' or 'e' character, respectively, to indicate that the method was called.
//
// Do not depend on this output being stable. If you need the ability to
// programmatically interpret the difference, consider using a custom Reporter.
func Diff(x, y interface{}, opts ...Option) string {
r := new(defaultReporter)
eq := Equal(x, y, Options(opts), Reporter(r))
d := r.String()
if (d == "") != eq {
panic("inconsistent difference and equality results")
}
return d
} }
type state struct { type state struct {
@@ -352,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
// assuming that T is assignable to R. // assuming that T is assignable to R.
// Otherwise, it returns the input value as is. // Otherwise, it returns the input value as is.
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
// TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
if !flags.AtLeastGo110 { if !flags.AtLeastGo110 {
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
return reflect.New(t).Elem() return reflect.New(t).Elem()
@@ -362,6 +386,7 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
} }
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
var addr bool
var vax, vay reflect.Value // Addressable versions of vx and vy var vax, vay reflect.Value // Addressable versions of vx and vy
var mayForce, mayForceInit bool var mayForce, mayForceInit bool
@@ -383,6 +408,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
// For retrieveUnexportedField to work, the parent struct must // For retrieveUnexportedField to work, the parent struct must
// be addressable. Create a new copy of the values if // be addressable. Create a new copy of the values if
// necessary to make them addressable. // necessary to make them addressable.
addr = vx.CanAddr() || vy.CanAddr()
vax = makeAddressable(vx) vax = makeAddressable(vx)
vay = makeAddressable(vy) vay = makeAddressable(vy)
} }
@@ -393,6 +419,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
mayForceInit = true mayForceInit = true
} }
step.mayForce = mayForce step.mayForce = mayForce
step.paddr = addr
step.pvx = vax step.pvx = vax
step.pvy = vay step.pvy = vay
step.field = t.Field(i) step.field = t.Field(i)

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build purego // +build purego
@@ -10,6 +10,6 @@ import "reflect"
const supportExporters = false const supportExporters = false
func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
panic("no support for forcibly accessing unexported fields") panic("no support for forcibly accessing unexported fields")
} }

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build !purego // +build !purego
@@ -17,9 +17,19 @@ const supportExporters = true
// a struct such that the value has read-write permissions. // a struct such that the value has read-write permissions.
// //
// The parent struct, v, must be addressable, while f must be a StructField // The parent struct, v, must be addressable, while f must be a StructField
// describing the field to retrieve. // describing the field to retrieve. If addr is false,
func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { // then the returned value will be shallowed copied to be non-addressable.
// See https://github.com/google/go-cmp/issues/167 for discussion of the func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
// following expression. ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() if !addr {
// A field is addressable if and only if the struct is addressable.
// If the original parent value was not addressable, shallow copy the
// value to make it non-addressable to avoid leaking an implementation
// detail of how forcibly exporting a field works.
if ve.Kind() == reflect.Interface && ve.IsNil() {
return reflect.Zero(f.Type)
}
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
}
return ve
} }

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build !cmp_debug // +build !cmp_debug

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build cmp_debug // +build cmp_debug

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// Package diff implements an algorithm for producing edit-scripts. // Package diff implements an algorithm for producing edit-scripts.
// The edit-script is a sequence of operations needed to transform one list // The edit-script is a sequence of operations needed to transform one list
@@ -12,6 +12,13 @@
// is more important than obtaining a minimal Levenshtein distance. // is more important than obtaining a minimal Levenshtein distance.
package diff package diff
import (
"math/rand"
"time"
"github.com/google/go-cmp/cmp/internal/flags"
)
// EditType represents a single operation within an edit-script. // EditType represents a single operation within an edit-script.
type EditType uint8 type EditType uint8
@@ -112,6 +119,8 @@ func (r Result) Similar() bool {
return r.NumSame+1 >= r.NumDiff return r.NumSame+1 >= r.NumDiff
} }
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
// Difference reports whether two lists of lengths nx and ny are equal // Difference reports whether two lists of lengths nx and ny are equal
// given the definition of equality provided as f. // given the definition of equality provided as f.
// //
@@ -177,6 +186,11 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// approximately the square-root of the search budget. // approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n) searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for // The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists. // computing sub-optimal edit-scripts between two lists.
// //
@@ -194,20 +208,26 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// frontier towards the opposite corner. // frontier towards the opposite corner.
// • This algorithm terminates when either the X coordinates or the // • This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect. // Y coordinates of the forward and reverse frontier points ever intersect.
//
// This algorithm is correct even if searching only in the forward direction // This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed // or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front // that two lists commonly differ because elements were added to the front
// or end of the other list. // or end of the other list.
// //
// Running the tests with the "cmp_debug" build tag prints a visualization // Non-deterministically start with either the forward or reverse direction
// of the algorithm running in real-time. This is educational for // to introduce some deliberate instability so that we have the flexibility
// understanding how the algorithm works. See debug_enable.go. // to change this algorithm in the future.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) if flags.Deterministic || randBool {
for { goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning. // Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break goto finishSearch
} }
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match. // Search in a diagonal pattern for a match.
@@ -242,10 +262,14 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
} else { } else {
fwdFrontier.Y++ fwdFrontier.Y++
} }
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end. // Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break goto finishSearch
} }
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match. // Search in a diagonal pattern for a match.
@@ -280,8 +304,10 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
} else { } else {
revFrontier.Y-- revFrontier.Y--
} }
goto forwardSearch
} }
finishSearch:
// Join the forward and reverse paths and then append the reverse path. // Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f) fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- { for i := len(revPath.es) - 1; i >= 0; i-- {

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package flags package flags

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build !go1.10 // +build !go1.10

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build go1.10 // +build go1.10

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// Package function provides functionality for identifying function types. // Package function provides functionality for identifying function types.
package function package function

View File

@@ -0,0 +1,157 @@
// Copyright 2020, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package value
import (
"reflect"
"strconv"
)
// TypeString is nearly identical to reflect.Type.String,
// but has an additional option to specify that full type names be used.
func TypeString(t reflect.Type, qualified bool) string {
return string(appendTypeName(nil, t, qualified, false))
}
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
// BUG: Go reflection provides no way to disambiguate two named types
// of the same name and within the same package,
// but declared within the namespace of different functions.
// Named type.
if t.Name() != "" {
if qualified && t.PkgPath() != "" {
b = append(b, '"')
b = append(b, t.PkgPath()...)
b = append(b, '"')
b = append(b, '.')
b = append(b, t.Name()...)
} else {
b = append(b, t.String()...)
}
return b
}
// Unnamed type.
switch k := t.Kind(); k {
case reflect.Bool, reflect.String, reflect.UnsafePointer,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
b = append(b, k.String()...)
case reflect.Chan:
if t.ChanDir() == reflect.RecvDir {
b = append(b, "<-"...)
}
b = append(b, "chan"...)
if t.ChanDir() == reflect.SendDir {
b = append(b, "<-"...)
}
b = append(b, ' ')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Func:
if !elideFunc {
b = append(b, "func"...)
}
b = append(b, '(')
for i := 0; i < t.NumIn(); i++ {
if i > 0 {
b = append(b, ", "...)
}
if i == t.NumIn()-1 && t.IsVariadic() {
b = append(b, "..."...)
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
} else {
b = appendTypeName(b, t.In(i), qualified, false)
}
}
b = append(b, ')')
switch t.NumOut() {
case 0:
// Do nothing
case 1:
b = append(b, ' ')
b = appendTypeName(b, t.Out(0), qualified, false)
default:
b = append(b, " ("...)
for i := 0; i < t.NumOut(); i++ {
if i > 0 {
b = append(b, ", "...)
}
b = appendTypeName(b, t.Out(i), qualified, false)
}
b = append(b, ')')
}
case reflect.Struct:
b = append(b, "struct{ "...)
for i := 0; i < t.NumField(); i++ {
if i > 0 {
b = append(b, "; "...)
}
sf := t.Field(i)
if !sf.Anonymous {
if qualified && sf.PkgPath != "" {
b = append(b, '"')
b = append(b, sf.PkgPath...)
b = append(b, '"')
b = append(b, '.')
}
b = append(b, sf.Name...)
b = append(b, ' ')
}
b = appendTypeName(b, sf.Type, qualified, false)
if sf.Tag != "" {
b = append(b, ' ')
b = strconv.AppendQuote(b, string(sf.Tag))
}
}
if b[len(b)-1] == ' ' {
b = b[:len(b)-1]
} else {
b = append(b, ' ')
}
b = append(b, '}')
case reflect.Slice, reflect.Array:
b = append(b, '[')
if k == reflect.Array {
b = strconv.AppendUint(b, uint64(t.Len()), 10)
}
b = append(b, ']')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Map:
b = append(b, "map["...)
b = appendTypeName(b, t.Key(), qualified, false)
b = append(b, ']')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Ptr:
b = append(b, '*')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Interface:
b = append(b, "interface{ "...)
for i := 0; i < t.NumMethod(); i++ {
if i > 0 {
b = append(b, "; "...)
}
m := t.Method(i)
if qualified && m.PkgPath != "" {
b = append(b, '"')
b = append(b, m.PkgPath...)
b = append(b, '"')
b = append(b, '.')
}
b = append(b, m.Name...)
b = appendTypeName(b, m.Type, qualified, true)
}
if b[len(b)-1] == ' ' {
b = b[:len(b)-1]
} else {
b = append(b, ' ')
}
b = append(b, '}')
default:
panic("invalid kind: " + k.String())
}
return b
}

View File

@@ -1,6 +1,6 @@
// Copyright 2018, The Go Authors. All rights reserved. // Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build purego // +build purego
@@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer {
// assumes that the GC implementation does not use a moving collector. // assumes that the GC implementation does not use a moving collector.
return Pointer{v.Pointer(), v.Type()} return Pointer{v.Pointer(), v.Type()}
} }
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == 0
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return p.p
}

View File

@@ -1,6 +1,6 @@
// Copyright 2018, The Go Authors. All rights reserved. // Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
// +build !purego // +build !purego
@@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer {
// which is necessary if the GC ever uses a moving collector. // which is necessary if the GC ever uses a moving collector.
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
} }
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == nil
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return uintptr(p.p)
}

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package value package value

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package value package value

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
// Unable to Interface implies unexported field without visibility access. // Unable to Interface implies unexported field without visibility access.
if !vx.CanInterface() || !vy.CanInterface() { if !vx.CanInterface() || !vy.CanInterface() {
const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
var name string var name string
if t := s.curPath.Index(-2).Type(); t.Name() != "" { if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields. // Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
}
} else { } else {
// Unnamed type with unexported fields. Derive PkgPath from field. // Unnamed type with unexported fields. Derive PkgPath from field.
var pkgPath string var pkgPath string

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -177,7 +177,8 @@ type structField struct {
// pvx, pvy, and field are only valid if unexported is true. // pvx, pvy, and field are only valid if unexported is true.
unexported bool unexported bool
mayForce bool // Forcibly allow visibility mayForce bool // Forcibly allow visibility
pvx, pvy reflect.Value // Parent values paddr bool // Was parent addressable?
pvx, pvy reflect.Value // Parent values (always addressible)
field reflect.StructField // Field information field reflect.StructField // Field information
} }
@@ -189,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) {
// Forcibly obtain read-write access to an unexported struct field. // Forcibly obtain read-write access to an unexported struct field.
if sf.mayForce { if sf.mayForce {
vx = retrieveUnexportedField(sf.pvx, sf.field) vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
vy = retrieveUnexportedField(sf.pvy, sf.field) vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
return vx, vy // CanInterface reports true return vx, vy // CanInterface reports true
} }
return sf.vx, sf.vy // CanInterface reports false return sf.vx, sf.vy // CanInterface reports false

View File

@@ -1,6 +1,6 @@
// Copyright 2017, The Go Authors. All rights reserved. // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -41,7 +41,10 @@ func (r *defaultReporter) String() string {
if r.root.NumDiff == 0 { if r.root.NumDiff == 0 {
return "" return ""
} }
return formatOptions{}.FormatDiff(r.root).String() ptrs := new(pointerReferences)
text := formatOptions{}.FormatDiff(r.root, ptrs)
resolveReferences(text)
return text.String()
} }
func assert(ok bool) { func assert(ok bool) {

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -11,14 +11,6 @@ import (
"github.com/google/go-cmp/cmp/internal/value" "github.com/google/go-cmp/cmp/internal/value"
) )
// TODO: Enforce limits?
// * Enforce maximum number of records to print per node?
// * Enforce maximum size in bytes allowed?
// * As a heuristic, use less verbosity for equal nodes than unequal nodes.
// TODO: Enforce unique outputs?
// * Avoid Stringer methods if it results in same output?
// * Print pointer address if outputs still equal?
// numContextRecords is the number of surrounding equal records to print. // numContextRecords is the number of surrounding equal records to print.
const numContextRecords = 2 const numContextRecords = 2
@@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
opts.TypeMode = t opts.TypeMode = t
return opts return opts
} }
func (opts formatOptions) WithVerbosity(level int) formatOptions {
opts.VerbosityLevel = level
opts.LimitVerbosity = true
return opts
}
func (opts formatOptions) verbosity() uint {
switch {
case opts.VerbosityLevel < 0:
return 0
case opts.VerbosityLevel > 16:
return 16 // some reasonable maximum to avoid shift overflow
default:
return uint(opts.VerbosityLevel)
}
}
const maxVerbosityPreset = 3
// verbosityPreset modifies the verbosity settings given an index
// between 0 and maxVerbosityPreset, inclusive.
func verbosityPreset(opts formatOptions, i int) formatOptions {
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
if i > 0 {
opts.AvoidStringer = true
}
if i >= maxVerbosityPreset {
opts.PrintAddresses = true
opts.QualifiedNames = true
}
return opts
}
// FormatDiff converts a valueNode tree into a textNode tree, where the later // FormatDiff converts a valueNode tree into a textNode tree, where the later
// is a textual representation of the differences detected in the former. // is a textual representation of the differences detected in the former.
func (opts formatOptions) FormatDiff(v *valueNode) textNode { func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
if opts.DiffMode == diffIdentical {
opts = opts.WithVerbosity(1)
} else {
opts = opts.WithVerbosity(3)
}
// Check whether we have specialized formatting for this node. // Check whether we have specialized formatting for this node.
// This is not necessary, but helpful for producing more readable outputs. // This is not necessary, but helpful for producing more readable outputs.
if opts.CanFormatDiffSlice(v) { if opts.CanFormatDiffSlice(v) {
return opts.FormatDiffSlice(v) return opts.FormatDiffSlice(v)
} }
var parentKind reflect.Kind
if v.parent != nil && v.parent.TransformerName == "" {
parentKind = v.parent.Type.Kind()
}
// For leaf nodes, format the value based on the reflect.Values alone. // For leaf nodes, format the value based on the reflect.Values alone.
if v.MaxDepth == 0 { if v.MaxDepth == 0 {
switch opts.DiffMode { switch opts.DiffMode {
case diffUnknown, diffIdentical: case diffUnknown, diffIdentical:
// Format Equal. // Format Equal.
if v.NumDiff == 0 { if v.NumDiff == 0 {
outx := opts.FormatValue(v.ValueX, visitedPointers{}) outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
outy := opts.FormatValue(v.ValueY, visitedPointers{}) outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
if v.NumIgnored > 0 && v.NumSame == 0 { if v.NumIgnored > 0 && v.NumSame == 0 {
return textEllipsis return textEllipsis
} else if outx.Len() < outy.Len() { } else if outx.Len() < outy.Len() {
@@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode {
// Format unequal. // Format unequal.
assert(opts.DiffMode == diffUnknown) assert(opts.DiffMode == diffUnknown)
var list textList var list textList
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
}
if outx != nil { if outx != nil {
list = append(list, textRecord{Diff: '-', Value: outx}) list = append(list, textRecord{Diff: '-', Value: outx})
} }
@@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode {
} }
return opts.WithTypeMode(emitType).FormatType(v.Type, list) return opts.WithTypeMode(emitType).FormatType(v.Type, list)
case diffRemoved: case diffRemoved:
return opts.FormatValue(v.ValueX, visitedPointers{}) return opts.FormatValue(v.ValueX, parentKind, ptrs)
case diffInserted: case diffInserted:
return opts.FormatValue(v.ValueY, visitedPointers{}) return opts.FormatValue(v.ValueY, parentKind, ptrs)
default: default:
panic("invalid diff mode") panic("invalid diff mode")
} }
} }
// Register slice element to support cycle detection.
if parentKind == reflect.Slice {
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
defer ptrs.Pop()
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
}
// Descend into the child value node. // Descend into the child value node.
if v.TransformerName != "" { if v.TransformerName != "" {
out := opts.WithTypeMode(emitType).FormatDiff(v.Value) out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
return opts.FormatType(v.Type, out) return opts.FormatType(v.Type, out)
} else { } else {
switch k := v.Type.Kind(); k { switch k := v.Type.Kind(); k {
case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: case reflect.Struct, reflect.Array, reflect.Slice:
return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) out = opts.formatDiffList(v.Records, k, ptrs)
out = opts.FormatType(v.Type, out)
case reflect.Map:
// Register map to support cycle detection.
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
defer ptrs.Pop()
out = opts.formatDiffList(v.Records, k, ptrs)
out = wrapTrunkReferences(ptrRefs, out)
out = opts.FormatType(v.Type, out)
case reflect.Ptr: case reflect.Ptr:
return textWrap{"&", opts.FormatDiff(v.Value), ""} // Register pointer to support cycle detection.
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
defer ptrs.Pop()
out = opts.FormatDiff(v.Value, ptrs)
out = wrapTrunkReferences(ptrRefs, out)
out = &textWrap{Prefix: "&", Value: out}
case reflect.Interface: case reflect.Interface:
return opts.WithTypeMode(emitType).FormatDiff(v.Value) out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
default: default:
panic(fmt.Sprintf("%v cannot have children", k)) panic(fmt.Sprintf("%v cannot have children", k))
} }
return out
} }
} }
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
// Derive record name based on the data structure kind. // Derive record name based on the data structure kind.
var name string var name string
var formatKey func(reflect.Value) string var formatKey func(reflect.Value) string
@@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
case reflect.Map: case reflect.Map:
name = "entry" name = "entry"
opts = opts.WithTypeMode(elideType) opts = opts.WithTypeMode(elideType)
formatKey = formatMapKey formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
}
maxLen := -1
if opts.LimitVerbosity {
if opts.DiffMode == diffIdentical {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
} else {
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
}
opts.VerbosityLevel--
} }
// Handle unification. // Handle unification.
@@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
var list textList var list textList
var deferredEllipsis bool // Add final "..." to indicate records were dropped var deferredEllipsis bool // Add final "..." to indicate records were dropped
for _, r := range recs { for _, r := range recs {
if len(list) == maxLen {
deferredEllipsis = true
break
}
// Elide struct fields that are zero value. // Elide struct fields that are zero value.
if k == reflect.Struct { if k == reflect.Struct {
var isZero bool var isZero bool
@@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
} }
continue continue
} }
if out := opts.FormatDiff(r.Value); out != nil { if out := opts.FormatDiff(r.Value, ptrs); out != nil {
list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
} }
} }
if deferredEllipsis { if deferredEllipsis {
list.AppendEllipsis(diffStats{}) list.AppendEllipsis(diffStats{})
} }
return textWrap{"{", list, "}"} return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
case diffUnknown: case diffUnknown:
default: default:
panic("invalid diff mode") panic("invalid diff mode")
} }
// Handle differencing. // Handle differencing.
var numDiffs int
var list textList var list textList
var keys []reflect.Value // invariant: len(list) == len(keys)
groups := coalesceAdjacentRecords(name, recs) groups := coalesceAdjacentRecords(name, recs)
maxGroup := diffStats{Name: name}
for i, ds := range groups { for i, ds := range groups {
if maxLen >= 0 && numDiffs >= maxLen {
maxGroup = maxGroup.Append(ds)
continue
}
// Handle equal records. // Handle equal records.
if ds.NumDiff() == 0 { if ds.NumDiff() == 0 {
// Compute the number of leading and trailing records to print. // Compute the number of leading and trailing records to print.
@@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
// Format the equal values. // Format the equal values.
for _, r := range recs[:numLo] { for _, r := range recs[:numLo] {
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
} }
if numEqual > numLo+numHi { if numEqual > numLo+numHi {
ds.NumIdentical -= numLo + numHi ds.NumIdentical -= numLo + numHi
list.AppendEllipsis(ds) list.AppendEllipsis(ds)
for len(keys) < len(list) {
keys = append(keys, reflect.Value{})
}
} }
for _, r := range recs[numEqual-numHi : numEqual] { for _, r := range recs[numEqual-numHi : numEqual] {
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
} }
recs = recs[numEqual:] recs = recs[numEqual:]
continue continue
@@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
case opts.CanFormatDiffSlice(r.Value): case opts.CanFormatDiffSlice(r.Value):
out := opts.FormatDiffSlice(r.Value) out := opts.FormatDiffSlice(r.Value)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
case r.Value.NumChildren == r.Value.MaxDepth: case r.Value.NumChildren == r.Value.MaxDepth:
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
opts2 := verbosityPreset(opts, i)
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
}
if outx != nil { if outx != nil {
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
keys = append(keys, r.Key)
} }
if outy != nil { if outy != nil {
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
keys = append(keys, r.Key)
} }
default: default:
out := opts.FormatDiff(r.Value) out := opts.FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
} }
} }
recs = recs[ds.NumDiff():] recs = recs[ds.NumDiff():]
numDiffs += ds.NumDiff()
} }
if maxGroup.IsZero() {
assert(len(recs) == 0) assert(len(recs) == 0)
return textWrap{"{", list, "}"} } else {
list.AppendEllipsis(maxGroup)
for len(keys) < len(list) {
keys = append(keys, reflect.Value{})
}
}
assert(len(list) == len(keys))
// For maps, the default formatting logic uses fmt.Stringer which may
// produce ambiguous output. Avoid calling String to disambiguate.
if k == reflect.Map {
var ambiguous bool
seenKeys := map[string]reflect.Value{}
for i, currKey := range keys {
if currKey.IsValid() {
strKey := list[i].Key
prevKey, seen := seenKeys[strKey]
if seen && prevKey.CanInterface() && currKey.CanInterface() {
ambiguous = prevKey.Interface() != currKey.Interface()
if ambiguous {
break
}
}
seenKeys[strKey] = currKey
}
}
if ambiguous {
for i, k := range keys {
if k.IsValid() {
list[i].Key = formatMapKey(k, true, ptrs)
}
}
}
}
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
} }
// coalesceAdjacentRecords coalesces the list of records into groups of // coalesceAdjacentRecords coalesces the list of records into groups of

View File

@@ -0,0 +1,264 @@
// Copyright 2020, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/value"
)
const (
pointerDelimPrefix = "⟪"
pointerDelimSuffix = "⟫"
)
// formatPointer prints the address of the pointer.
func formatPointer(p value.Pointer, withDelims bool) string {
v := p.Uintptr()
if flags.Deterministic {
v = 0xdeadf00f // Only used for stable testing purposes
}
if withDelims {
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
}
return formatHex(uint64(v))
}
// pointerReferences is a stack of pointers visited so far.
type pointerReferences [][2]value.Pointer
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
if deref && vx.IsValid() {
vx = vx.Addr()
}
if deref && vy.IsValid() {
vy = vy.Addr()
}
switch d {
case diffUnknown, diffIdentical:
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
case diffRemoved:
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
case diffInserted:
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
}
*ps = append(*ps, pp)
return pp
}
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
p = value.PointerOf(v)
for _, pp := range *ps {
if p == pp[0] || p == pp[1] {
return p, true
}
}
*ps = append(*ps, [2]value.Pointer{p, p})
return p, false
}
func (ps *pointerReferences) Pop() {
*ps = (*ps)[:len(*ps)-1]
}
// trunkReferences is metadata for a textNode indicating that the sub-tree
// represents the value for either pointer in a pair of references.
type trunkReferences struct{ pp [2]value.Pointer }
// trunkReference is metadata for a textNode indicating that the sub-tree
// represents the value for the given pointer reference.
type trunkReference struct{ p value.Pointer }
// leafReference is metadata for a textNode indicating that the value is
// truncated as it refers to another part of the tree (i.e., a trunk).
type leafReference struct{ p value.Pointer }
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
switch {
case pp[0].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
case pp[1].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
case pp[0] == pp[1]:
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
default:
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
}
}
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
}
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
}
// resolveReferences walks the textNode tree searching for any leaf reference
// metadata and resolves each against the corresponding trunk references.
// Since pointer addresses in memory are not particularly readable to the user,
// it replaces each pointer value with an arbitrary and unique reference ID.
func resolveReferences(s textNode) {
var walkNodes func(textNode, func(textNode))
walkNodes = func(s textNode, f func(textNode)) {
f(s)
switch s := s.(type) {
case *textWrap:
walkNodes(s.Value, f)
case textList:
for _, r := range s {
walkNodes(r.Value, f)
}
}
}
// Collect all trunks and leaves with reference metadata.
var trunks, leaves []*textWrap
walkNodes(s, func(s textNode) {
if s, ok := s.(*textWrap); ok {
switch s.Metadata.(type) {
case leafReference:
leaves = append(leaves, s)
case trunkReference, trunkReferences:
trunks = append(trunks, s)
}
}
})
// No leaf references to resolve.
if len(leaves) == 0 {
return
}
// Collect the set of all leaf references to resolve.
leafPtrs := make(map[value.Pointer]bool)
for _, leaf := range leaves {
leafPtrs[leaf.Metadata.(leafReference).p] = true
}
// Collect the set of trunk pointers that are always paired together.
// This allows us to assign a single ID to both pointers for brevity.
// If a pointer in a pair ever occurs by itself or as a different pair,
// then the pair is broken.
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
unpair := func(p value.Pointer) {
if !pairedTrunkPtrs[p].IsNil() {
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
}
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
unpair(p.p) // standalone pointer cannot be part of a pair
case trunkReferences:
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
switch {
case !ok0 && !ok1:
// Register the newly seen pair.
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
// Exact pair already seen; do nothing.
default:
// Pair conflicts with some other pair; break all pairs.
unpair(p.pp[0])
unpair(p.pp[1])
}
}
}
// Correlate each pointer referenced by leaves to a unique identifier,
// and print the IDs for each trunk that matches those pointers.
var nextID uint
ptrIDs := make(map[value.Pointer]uint)
newID := func() uint {
id := nextID
nextID++
return id
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
if print := leafPtrs[p.p]; print {
id, ok := ptrIDs[p.p]
if !ok {
id = newID()
ptrIDs[p.p] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
}
case trunkReferences:
print0 := leafPtrs[p.pp[0]]
print1 := leafPtrs[p.pp[1]]
if print0 || print1 {
id0, ok0 := ptrIDs[p.pp[0]]
id1, ok1 := ptrIDs[p.pp[1]]
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
if isPair {
var id uint
assert(ok0 == ok1) // must be seen together or not at all
if ok0 {
assert(id0 == id1) // must have the same ID
id = id0
} else {
id = newID()
ptrIDs[p.pp[0]] = id
ptrIDs[p.pp[1]] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
} else {
if print0 && !ok0 {
id0 = newID()
ptrIDs[p.pp[0]] = id0
}
if print1 && !ok1 {
id1 = newID()
ptrIDs[p.pp[1]] = id1
}
switch {
case print0 && print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
case print0:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
case print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
}
}
}
}
}
// Update all leaf references with the unique identifier.
for _, leaf := range leaves {
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
}
}
}
func formatReference(id uint) string {
return fmt.Sprintf("ref#%d", id)
}
func updateReferencePrefix(prefix, ref string) string {
if prefix == "" {
return pointerDelimPrefix + ref + pointerDelimSuffix
}
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
return pointerDelimPrefix + ref + ": " + suffix
}

View File

@@ -1,17 +1,18 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/value" "github.com/google/go-cmp/cmp/internal/value"
) )
@@ -20,14 +21,22 @@ type formatValueOptions struct {
// methods like error.Error or fmt.Stringer.String. // methods like error.Error or fmt.Stringer.String.
AvoidStringer bool AvoidStringer bool
// ShallowPointers controls whether to avoid descending into pointers.
// Useful when printing map keys, where pointer comparison is performed
// on the pointer address rather than the pointed-at value.
ShallowPointers bool
// PrintAddresses controls whether to print the address of all pointers, // PrintAddresses controls whether to print the address of all pointers,
// slice elements, and maps. // slice elements, and maps.
PrintAddresses bool PrintAddresses bool
// QualifiedNames controls whether FormatType uses the fully qualified name
// (including the full package path as opposed to just the package name).
QualifiedNames bool
// VerbosityLevel controls the amount of output to produce.
// A higher value produces more output. A value of zero or lower produces
// no output (represented using an ellipsis).
// If LimitVerbosity is false, then the level is treated as infinite.
VerbosityLevel int
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
LimitVerbosity bool
} }
// FormatType prints the type as if it were wrapping s. // FormatType prints the type as if it were wrapping s.
@@ -44,12 +53,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
default: default:
return s return s
} }
if opts.DiffMode == diffIdentical {
return s // elide type for identical nodes
}
case elideType: case elideType:
return s return s
} }
// Determine the type label, applying special handling for unnamed types. // Determine the type label, applying special handling for unnamed types.
typeName := t.String() typeName := value.TypeString(t, opts.QualifiedNames)
if t.Name() == "" { if t.Name() == "" {
// According to Go grammar, certain type literals contain symbols that // According to Go grammar, certain type literals contain symbols that
// do not strongly bind to the next lexicographical token (e.g., *T). // do not strongly bind to the next lexicographical token (e.g., *T).
@@ -57,39 +69,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
case reflect.Chan, reflect.Func, reflect.Ptr: case reflect.Chan, reflect.Func, reflect.Ptr:
typeName = "(" + typeName + ")" typeName = "(" + typeName + ")"
} }
typeName = strings.Replace(typeName, "struct {", "struct{", -1) }
typeName = strings.Replace(typeName, "interface {", "interface{", -1) return &textWrap{Prefix: typeName, Value: wrapParens(s)}
}
// wrapParens wraps s with a set of parenthesis, but avoids it if the
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
// It handles unwrapping one level of pointer-reference nodes.
func wrapParens(s textNode) textNode {
var refNode *textWrap
if s2, ok := s.(*textWrap); ok {
// Unwrap a single pointer reference node.
switch s2.Metadata.(type) {
case leafReference, trunkReference, trunkReferences:
refNode = s2
if s3, ok := refNode.Value.(*textWrap); ok {
s2 = s3
}
} }
// Avoid wrap the value in parenthesis if unnecessary. // Already has delimiters that make parenthesis unnecessary.
if s, ok := s.(textWrap); ok { hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}")
if hasParens || hasBraces { if hasParens || hasBraces {
return textWrap{typeName, s, ""} return s
} }
} }
return textWrap{typeName + "(", s, ")"} if refNode != nil {
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
return s
}
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
} }
// FormatValue prints the reflect.Value, taking extra care to avoid descending // FormatValue prints the reflect.Value, taking extra care to avoid descending
// into pointers already in m. As pointers are visited, m is also updated. // into pointers already in ptrs. As pointers are visited, ptrs is also updated.
func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
if !v.IsValid() { if !v.IsValid() {
return nil return nil
} }
t := v.Type() t := v.Type()
// Check slice element for cycles.
if parentKind == reflect.Slice {
ptrRef, visited := ptrs.Push(v.Addr())
if visited {
return makeLeafReference(ptrRef, false)
}
defer ptrs.Pop()
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
}
// Check whether there is an Error or String method to call. // Check whether there is an Error or String method to call.
if !opts.AvoidStringer && v.CanInterface() { if !opts.AvoidStringer && v.CanInterface() {
// Avoid calling Error or String methods on nil receivers since many // Avoid calling Error or String methods on nil receivers since many
// implementations crash when doing so. // implementations crash when doing so.
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
var prefix, strVal string
func() {
// Swallow and ignore any panics from String or Error.
defer func() { recover() }()
switch v := v.Interface().(type) { switch v := v.Interface().(type) {
case error: case error:
return textLine("e" + formatString(v.Error())) strVal = v.Error()
prefix = "e"
case fmt.Stringer: case fmt.Stringer:
return textLine("s" + formatString(v.String())) strVal = v.String()
prefix = "s"
}
}()
if prefix != "" {
return opts.formatString(prefix, strVal)
} }
} }
} }
@@ -102,94 +152,140 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
} }
}() }()
var ptr string
switch t.Kind() { switch t.Kind() {
case reflect.Bool: case reflect.Bool:
return textLine(fmt.Sprint(v.Bool())) return textLine(fmt.Sprint(v.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return textLine(fmt.Sprint(v.Int())) return textLine(fmt.Sprint(v.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// Unnamed uints are usually bytes or words, so use hexadecimal. return textLine(fmt.Sprint(v.Uint()))
if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { case reflect.Uint8:
if parentKind == reflect.Slice || parentKind == reflect.Array {
return textLine(formatHex(v.Uint())) return textLine(formatHex(v.Uint()))
} }
return textLine(fmt.Sprint(v.Uint())) return textLine(fmt.Sprint(v.Uint()))
case reflect.Uintptr:
return textLine(formatHex(v.Uint()))
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return textLine(fmt.Sprint(v.Float())) return textLine(fmt.Sprint(v.Float()))
case reflect.Complex64, reflect.Complex128: case reflect.Complex64, reflect.Complex128:
return textLine(fmt.Sprint(v.Complex())) return textLine(fmt.Sprint(v.Complex()))
case reflect.String: case reflect.String:
return textLine(formatString(v.String())) return opts.formatString("", v.String())
case reflect.UnsafePointer, reflect.Chan, reflect.Func: case reflect.UnsafePointer, reflect.Chan, reflect.Func:
return textLine(formatPointer(v)) return textLine(formatPointer(value.PointerOf(v), true))
case reflect.Struct: case reflect.Struct:
var list textList var list textList
v := makeAddressable(v) // needed for retrieveUnexportedField
maxLen := v.NumField()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
for i := 0; i < v.NumField(); i++ { for i := 0; i < v.NumField(); i++ {
vv := v.Field(i) vv := v.Field(i)
if value.IsZero(vv) { if value.IsZero(vv) {
continue // Elide fields with zero values continue // Elide fields with zero values
} }
s := opts.WithTypeMode(autoType).FormatValue(vv, m) if len(list) == maxLen {
list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) list.AppendEllipsis(diffStats{})
break
} }
return textWrap{"{", list, "}"} sf := t.Field(i)
if supportExporters && !isExported(sf.Name) {
vv = retrieveUnexportedField(v, sf, true)
}
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
list = append(list, textRecord{Key: sf.Name, Value: s})
}
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
case reflect.Slice: case reflect.Slice:
if v.IsNil() { if v.IsNil() {
return textNil return textNil
} }
if opts.PrintAddresses {
ptr = formatPointer(v) // Check whether this is a []byte of text data.
if t.Elem() == reflect.TypeOf(byte(0)) {
b := v.Bytes()
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
out = opts.formatString("", string(b))
return opts.WithTypeMode(emitType).FormatType(t, out)
} }
}
fallthrough fallthrough
case reflect.Array: case reflect.Array:
maxLen := v.Len()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
var list textList var list textList
for i := 0; i < v.Len(); i++ { for i := 0; i < v.Len(); i++ {
vi := v.Index(i) if len(list) == maxLen {
if vi.CanAddr() { // Check for cyclic elements list.AppendEllipsis(diffStats{})
p := vi.Addr() break
if m.Visit(p) {
var out textNode
out = textLine(formatPointer(p))
out = opts.WithTypeMode(emitType).FormatType(p.Type(), out)
out = textWrap{"*", out, ""}
list = append(list, textRecord{Value: out})
continue
} }
} s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
s := opts.WithTypeMode(elideType).FormatValue(vi, m)
list = append(list, textRecord{Value: s}) list = append(list, textRecord{Value: s})
} }
return textWrap{ptr + "{", list, "}"}
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
if t.Kind() == reflect.Slice && opts.PrintAddresses {
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
}
return out
case reflect.Map: case reflect.Map:
if v.IsNil() { if v.IsNil() {
return textNil return textNil
} }
if m.Visit(v) {
return textLine(formatPointer(v))
}
// Check pointer for cycles.
ptrRef, visited := ptrs.Push(v)
if visited {
return makeLeafReference(ptrRef, opts.PrintAddresses)
}
defer ptrs.Pop()
maxLen := v.Len()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
var list textList var list textList
for _, k := range value.SortKeys(v.MapKeys()) { for _, k := range value.SortKeys(v.MapKeys()) {
sk := formatMapKey(k) if len(list) == maxLen {
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) list.AppendEllipsis(diffStats{})
break
}
sk := formatMapKey(k, false, ptrs)
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
list = append(list, textRecord{Key: sk, Value: sv}) list = append(list, textRecord{Key: sk, Value: sv})
} }
if opts.PrintAddresses {
ptr = formatPointer(v) out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
} out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
return textWrap{ptr + "{", list, "}"} return out
case reflect.Ptr: case reflect.Ptr:
if v.IsNil() { if v.IsNil() {
return textNil return textNil
} }
if m.Visit(v) || opts.ShallowPointers {
return textLine(formatPointer(v)) // Check pointer for cycles.
} ptrRef, visited := ptrs.Push(v)
if opts.PrintAddresses { if visited {
ptr = formatPointer(v) out = makeLeafReference(ptrRef, opts.PrintAddresses)
return &textWrap{Prefix: "&", Value: out}
} }
defer ptrs.Pop()
skipType = true // Let the underlying value print the type instead skipType = true // Let the underlying value print the type instead
return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
out = &textWrap{Prefix: "&", Value: out}
return out
case reflect.Interface: case reflect.Interface:
if v.IsNil() { if v.IsNil() {
return textNil return textNil
@@ -197,19 +293,67 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
// Interfaces accept different concrete types, // Interfaces accept different concrete types,
// so configure the underlying value to explicitly print the type. // so configure the underlying value to explicitly print the type.
skipType = true // Print the concrete type instead skipType = true // Print the concrete type instead
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
default: default:
panic(fmt.Sprintf("%v kind not handled", v.Kind())) panic(fmt.Sprintf("%v kind not handled", v.Kind()))
} }
} }
func (opts formatOptions) formatString(prefix, s string) textNode {
maxLen := len(s)
maxLines := strings.Count(s, "\n") + 1
if opts.LimitVerbosity {
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
}
// For multiline strings, use the triple-quote syntax,
// but only use it when printing removed or inserted nodes since
// we only want the extra verbosity for those cases.
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
for i := 0; i < len(lines) && isTripleQuoted; i++ {
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
isPrintable := func(r rune) bool {
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
}
line := lines[i]
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
}
if isTripleQuoted {
var list textList
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
for i, line := range lines {
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
break
}
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
}
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
}
// Format the string as a single-line quoted string.
if len(s) > maxLen+len(textEllipsis) {
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
}
return textLine(prefix + formatString(s))
}
// formatMapKey formats v as if it were a map key. // formatMapKey formats v as if it were a map key.
// The result is guaranteed to be a single line. // The result is guaranteed to be a single line.
func formatMapKey(v reflect.Value) string { func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
var opts formatOptions var opts formatOptions
opts.DiffMode = diffIdentical
opts.TypeMode = elideType opts.TypeMode = elideType
opts.ShallowPointers = true opts.PrintAddresses = disambiguate
s := opts.FormatValue(v, visitedPointers{}).String() opts.AvoidStringer = disambiguate
opts.QualifiedNames = disambiguate
opts.VerbosityLevel = maxVerbosityPreset
opts.LimitVerbosity = true
s := opts.FormatValue(v, reflect.Map, ptrs).String()
return strings.TrimSpace(s) return strings.TrimSpace(s)
} }
@@ -227,7 +371,7 @@ func formatString(s string) string {
rawInvalid := func(r rune) bool { rawInvalid := func(r rune) bool {
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
} }
if strings.IndexFunc(s, rawInvalid) < 0 { if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
return "`" + s + "`" return "`" + s + "`"
} }
return qs return qs
@@ -256,23 +400,3 @@ func formatHex(u uint64) string {
} }
return fmt.Sprintf(f, u) return fmt.Sprintf(f, u)
} }
// formatPointer prints the address of the pointer.
func formatPointer(v reflect.Value) string {
p := v.Pointer()
if flags.Deterministic {
p = 0xdeadf00f // Only used for stable testing purposes
}
return fmt.Sprintf("⟪0x%x⟫", p)
}
type visitedPointers map[value.Pointer]struct{}
// Visit inserts pointer v into the visited map and reports whether it had
// already been visited before.
func (m visitedPointers) Visit(v reflect.Value) bool {
p := value.PointerOf(v)
_, visited := m[p]
m[p] = struct{}{}
return visited
}

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -8,6 +8,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"reflect" "reflect"
"strconv"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
@@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
return false // Must be formatting in diff mode return false // Must be formatting in diff mode
case v.NumDiff == 0: case v.NumDiff == 0:
return false // No differences detected return false // No differences detected
case v.NumIgnored+v.NumCompared+v.NumTransformed > 0:
// TODO: Handle the case where someone uses bytes.Equal on a large slice.
return false // Some custom option was used to determined equality
case !v.ValueX.IsValid() || !v.ValueY.IsValid(): case !v.ValueX.IsValid() || !v.ValueY.IsValid():
return false // Both values must be valid return false // Both values must be valid
case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0):
return false // Both slice values have to be non-empty
case v.NumIgnored > 0:
return false // Some ignore option was used
case v.NumTransformed > 0:
return false // Some transform option was used
case v.NumCompared > 1:
return false // More than one comparison was used
case v.NumCompared == 1 && v.Type.Name() != "":
// The need for cmp to check applicability of options on every element
// in a slice is a significant performance detriment for large []byte.
// The workaround is to specify Comparer(bytes.Equal),
// which enables cmp to compare []byte more efficiently.
// If they differ, we still want to provide batched diffing.
// The logic disallows named types since they tend to have their own
// String method, with nicer formatting than what this provides.
return false
} }
switch t := v.Type; t.Kind() { switch t := v.Type; t.Kind() {
@@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
} }
if isText || isBinary { if isText || isBinary {
var numLines, lastLineIdx, maxLineLen int var numLines, lastLineIdx, maxLineLen int
isBinary = false isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
for i, r := range sx + sy { for i, r := range sx + sy {
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
isBinary = true isBinary = true
@@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
} }
} }
isText = !isBinary isText = !isBinary
isLinedText = isText && numLines >= 4 && maxLineLen <= 256 isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
} }
// Format the string into printable records. // Format the string into printable records.
@@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
}, },
) )
delim = "\n" delim = "\n"
// If possible, use a custom triple-quote (""") syntax for printing
// differences in a string literal. This format is more readable,
// but has edge-cases where differences are visually indistinguishable.
// This format is avoided under the following conditions:
// • A line starts with `"""`
// • A line starts with "..."
// • A line contains non-printable characters
// • Adjacent different lines differ only by whitespace
//
// For example:
// """
// ... // 3 identical lines
// foo
// bar
// - baz
// + BAZ
// """
isTripleQuoted := true
prevRemoveLines := map[string]bool{}
prevInsertLines := map[string]bool{}
var list2 textList
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
for _, r := range list {
if !r.Value.Equal(textEllipsis) {
line, _ := strconv.Unquote(string(r.Value.(textLine)))
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
normLine := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1 // drop whitespace to avoid visually indistinguishable output
}
return r
}, line)
isPrintable := func(r rune) bool {
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
}
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
switch r.Diff {
case diffRemoved:
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
prevRemoveLines[normLine] = true
case diffInserted:
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
prevInsertLines[normLine] = true
}
if !isTripleQuoted {
break
}
r.Value = textLine(line)
r.ElideComma = true
}
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
prevRemoveLines = map[string]bool{}
prevInsertLines = map[string]bool{}
}
list2 = append(list2, r)
}
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
list2 = list2[:len(list2)-1] // elide single empty line at the end
}
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
if isTripleQuoted {
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
switch t.Kind() {
case reflect.String:
if t != reflect.TypeOf(string("")) {
out = opts.FormatType(t, out)
}
case reflect.Slice:
// Always emit type for slices since the triple-quote syntax
// looks like a string (not a slice).
opts = opts.WithTypeMode(emitType)
out = opts.FormatType(t, out)
}
return out
}
// If the text appears to be single-lined text, // If the text appears to be single-lined text,
// then perform differencing in approximately fixed-sized chunks. // then perform differencing in approximately fixed-sized chunks.
// The output is printed as quoted strings. // The output is printed as quoted strings.
@@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
}, },
) )
delim = "" delim = ""
// If the text appears to be binary data, // If the text appears to be binary data,
// then perform differencing in approximately fixed-sized chunks. // then perform differencing in approximately fixed-sized chunks.
// The output is inspired by hexdump. // The output is inspired by hexdump.
@@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
return textRecord{Diff: d, Value: textLine(s), Comment: comment} return textRecord{Diff: d, Value: textLine(s), Comment: comment}
}, },
) )
// For all other slices of primitive types, // For all other slices of primitive types,
// then perform differencing in approximately fixed-sized chunks. // then perform differencing in approximately fixed-sized chunks.
// The size of each chunk depends on the width of the element kind. // The size of each chunk depends on the width of the element kind.
@@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
switch t.Elem().Kind() { switch t.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
ss = append(ss, fmt.Sprint(v.Index(i).Int())) ss = append(ss, fmt.Sprint(v.Index(i).Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
case reflect.Uint8, reflect.Uintptr:
ss = append(ss, formatHex(v.Index(i).Uint())) ss = append(ss, formatHex(v.Index(i).Uint()))
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
ss = append(ss, fmt.Sprint(v.Index(i).Interface())) ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
@@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
} }
// Wrap the output with appropriate type information. // Wrap the output with appropriate type information.
var out textNode = textWrap{"{", list, "}"} var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
if !isText { if !isText {
// The "{...}" byte-sequence literal is not valid Go syntax for strings. // The "{...}" byte-sequence literal is not valid Go syntax for strings.
// Emit the type for extra clarity (e.g. "string{...}"). // Emit the type for extra clarity (e.g. "string{...}").
@@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
} }
switch t.Kind() { switch t.Kind() {
case reflect.String: case reflect.String:
out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
if t != reflect.TypeOf(string("")) { if t != reflect.TypeOf(string("")) {
out = opts.FormatType(t, out) out = opts.FormatType(t, out)
} }
case reflect.Slice: case reflect.Slice:
out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
if t != reflect.TypeOf([]byte(nil)) { if t != reflect.TypeOf([]byte(nil)) {
out = opts.FormatType(t, out) out = opts.FormatType(t, out)
} }
@@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice(
return n0 - v.Len() return n0 - v.Len()
} }
var numDiffs int
maxLen := -1
if opts.LimitVerbosity {
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
opts.VerbosityLevel--
}
groups := coalesceAdjacentEdits(name, es) groups := coalesceAdjacentEdits(name, es)
groups = coalesceInterveningIdentical(groups, chunkSize/4) groups = coalesceInterveningIdentical(groups, chunkSize/4)
maxGroup := diffStats{Name: name}
for i, ds := range groups { for i, ds := range groups {
if maxLen >= 0 && numDiffs >= maxLen {
maxGroup = maxGroup.Append(ds)
continue
}
// Print equal. // Print equal.
if ds.NumDiff() == 0 { if ds.NumDiff() == 0 {
// Compute the number of leading and trailing equal bytes to print. // Compute the number of leading and trailing equal bytes to print.
@@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice(
} }
// Print unequal. // Print unequal.
len0 := len(list)
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
vx = vx.Slice(nx, vx.Len()) vx = vx.Slice(nx, vx.Len())
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
vy = vy.Slice(ny, vy.Len()) vy = vy.Slice(ny, vy.Len())
numDiffs += len(list) - len0
} }
if maxGroup.IsZero() {
assert(vx.Len() == 0 && vy.Len() == 0) assert(vx.Len() == 0 && vy.Len() == 0)
} else {
list.AppendEllipsis(maxGroup)
}
return list return list
} }

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp
@@ -10,12 +10,15 @@ import (
"math/rand" "math/rand"
"strings" "strings"
"time" "time"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/flags"
) )
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
const maxColumnLength = 80
type indentMode int type indentMode int
func (n indentMode) appendIndent(b []byte, d diffMode) []byte { func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
@@ -94,18 +97,19 @@ type textWrap struct {
Prefix string // e.g., "bytes.Buffer{" Prefix string // e.g., "bytes.Buffer{"
Value textNode // textWrap | textList | textLine Value textNode // textWrap | textList | textLine
Suffix string // e.g., "}" Suffix string // e.g., "}"
Metadata interface{} // arbitrary metadata; has no effect on formatting
} }
func (s textWrap) Len() int { func (s *textWrap) Len() int {
return len(s.Prefix) + s.Value.Len() + len(s.Suffix) return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
} }
func (s1 textWrap) Equal(s2 textNode) bool { func (s1 *textWrap) Equal(s2 textNode) bool {
if s2, ok := s2.(textWrap); ok { if s2, ok := s2.(*textWrap); ok {
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
} }
return false return false
} }
func (s textWrap) String() string { func (s *textWrap) String() string {
var d diffMode var d diffMode
var n indentMode var n indentMode
_, s2 := s.formatCompactTo(nil, d) _, s2 := s.formatCompactTo(nil, d)
@@ -114,7 +118,7 @@ func (s textWrap) String() string {
b = append(b, '\n') // Trailing newline b = append(b, '\n') // Trailing newline
return string(b) return string(b)
} }
func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
n0 := len(b) // Original buffer length n0 := len(b) // Original buffer length
b = append(b, s.Prefix...) b = append(b, s.Prefix...)
b, s.Value = s.Value.formatCompactTo(b, d) b, s.Value = s.Value.formatCompactTo(b, d)
@@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
} }
return b, s return b, s
} }
func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
b = append(b, s.Prefix...) b = append(b, s.Prefix...)
b = s.Value.formatExpandedTo(b, d, n) b = s.Value.formatExpandedTo(b, d, n)
b = append(b, s.Suffix...) b = append(b, s.Suffix...)
@@ -139,6 +143,7 @@ type textRecord struct {
Diff diffMode // e.g., 0 or '-' or '+' Diff diffMode // e.g., 0 or '-' or '+'
Key string // e.g., "MyField" Key string // e.g., "MyField"
Value textNode // textWrap | textLine Value textNode // textWrap | textLine
ElideComma bool // avoid trailing comma
Comment fmt.Stringer // e.g., "6 identical fields" Comment fmt.Stringer // e.g., "6 identical fields"
} }
@@ -146,12 +151,12 @@ type textRecord struct {
// exists at the end. If cs is non-zero it coalesces the statistics with the // exists at the end. If cs is non-zero it coalesces the statistics with the
// previous diffStats. // previous diffStats.
func (s *textList) AppendEllipsis(ds diffStats) { func (s *textList) AppendEllipsis(ds diffStats) {
hasStats := ds != diffStats{} hasStats := !ds.IsZero()
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
if hasStats { if hasStats {
*s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
} else { } else {
*s = append(*s, textRecord{Value: textEllipsis}) *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
} }
return return
} }
@@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool {
} }
func (s textList) String() string { func (s textList) String() string {
return textWrap{"{", s, "}"}.String() return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
} }
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
@@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
} }
// Force multi-lined output when printing a removed/inserted node that // Force multi-lined output when printing a removed/inserted node that
// is sufficiently long. // is sufficiently long.
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
multiLine = true multiLine = true
} }
if !multiLine { if !multiLine {
@@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
_, isLine := r.Value.(textLine) _, isLine := r.Value.(textLine)
return r.Key == "" || !isLine return r.Key == "" || !isLine
}, },
func(r textRecord) int { return len(r.Key) }, func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
) )
alignValueLens := s.alignLens( alignValueLens := s.alignLens(
func(r textRecord) bool { func(r textRecord) bool {
_, isLine := r.Value.(textLine) _, isLine := r.Value.(textLine)
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
}, },
func(r textRecord) int { return len(r.Value.(textLine)) }, func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
) )
// Format lists of simple lists in a batched form.
// If the list is sequence of only textLine values,
// then batch multiple values on a single line.
var isSimple bool
for _, r := range s {
_, isLine := r.Value.(textLine)
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
if !isSimple {
break
}
}
if isSimple {
n++
var batch []byte
emitBatch := func() {
if len(batch) > 0 {
b = n.appendIndent(append(b, '\n'), d)
b = append(b, bytes.TrimRight(batch, " ")...)
batch = batch[:0]
}
}
for _, r := range s {
line := r.Value.(textLine)
if len(batch)+len(line)+len(", ") > maxColumnLength {
emitBatch()
}
batch = append(batch, line...)
batch = append(batch, ", "...)
}
emitBatch()
n--
return n.appendIndent(append(b, '\n'), d)
}
// Format the list as a multi-lined output. // Format the list as a multi-lined output.
n++ n++
for i, r := range s { for i, r := range s {
@@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
b = alignKeyLens[i].appendChar(b, ' ') b = alignKeyLens[i].appendChar(b, ' ')
b = r.Value.formatExpandedTo(b, d|r.Diff, n) b = r.Value.formatExpandedTo(b, d|r.Diff, n)
if !r.Value.Equal(textEllipsis) { if !r.ElideComma {
b = append(b, ',') b = append(b, ',')
} }
b = alignValueLens[i].appendChar(b, ' ') b = alignValueLens[i].appendChar(b, ' ')
@@ -332,6 +371,11 @@ type diffStats struct {
NumModified int NumModified int
} }
func (s diffStats) IsZero() bool {
s.Name = ""
return s == diffStats{}
}
func (s diffStats) NumDiff() int { func (s diffStats) NumDiff() int {
return s.NumRemoved + s.NumInserted + s.NumModified return s.NumRemoved + s.NumInserted + s.NumModified
} }

View File

@@ -1,6 +1,6 @@
// Copyright 2019, The Go Authors. All rights reserved. // Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE file.
package cmp package cmp

View File

@@ -1,13 +1,10 @@
language: go language: go
go: go:
- 1.4 - 1.11.x
- 1.3 - 1.12.x
- 1.2 - 1.13.x
- tip - master
install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script: script:
- go test -cover - go test -cover

View File

@@ -1,7 +1,7 @@
# How to contribute # # How to contribute #
We'd love to accept your patches and contributions to this project. There are We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow. just a few small guidelines you need to follow.
## Contributor License Agreement ## ## Contributor License Agreement ##

View File

@@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
See more examples in ```example_test.go```. See more examples in ```example_test.go```.
You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
go-fuzz provides the user a byte-slice, which should be converted to different inputs
for the tested function. This library can help convert the byte slice. Consider for
example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments:
```go
// +build gofuzz
package mypackage
import fuzz "github.com/google/gofuzz"
func Fuzz(data []byte) int {
var i int
fuzz.NewFromGoFuzz(data).Fuzz(&i)
MyFunc(i)
return 0
}
```
Happy testing! Happy testing!

View File

@@ -0,0 +1,81 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package bytesource provides a rand.Source64 that is determined by a slice of bytes.
package bytesource
import (
"bytes"
"encoding/binary"
"io"
"math/rand"
)
// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are
// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a
// fallback pseudo random source is created in case more random numbers are required.
// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly.
type ByteSource struct {
*bytes.Reader
fallback rand.Source
}
// New returns a new ByteSource from a given slice of bytes.
func New(input []byte) *ByteSource {
s := &ByteSource{
Reader: bytes.NewReader(input),
fallback: rand.NewSource(0),
}
if len(input) > 0 {
s.fallback = rand.NewSource(int64(s.consumeUint64()))
}
return s
}
func (s *ByteSource) Uint64() uint64 {
// Return from input if it was not exhausted.
if s.Len() > 0 {
return s.consumeUint64()
}
// Input was exhausted, return random number from fallback (in this case fallback should not be
// nil). Try first having a Uint64 output (Should work in current rand implementation),
// otherwise return a conversion of Int63.
if s64, ok := s.fallback.(rand.Source64); ok {
return s64.Uint64()
}
return uint64(s.fallback.Int63())
}
func (s *ByteSource) Int63() int64 {
return int64(s.Uint64() >> 1)
}
func (s *ByteSource) Seed(seed int64) {
s.fallback = rand.NewSource(seed)
s.Reader = bytes.NewReader(nil)
}
// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the
// bytes reader is not empty.
func (s *ByteSource) consumeUint64() uint64 {
var bytes [8]byte
_, err := s.Read(bytes[:])
if err != nil && err != io.EOF {
panic("failed reading source") // Should not happen.
}
return binary.BigEndian.Uint64(bytes[:])
}

View File

@@ -22,6 +22,9 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"time" "time"
"github.com/google/gofuzz/bytesource"
"strings"
) )
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
@@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer {
return f return f
} }
// NewFromGoFuzz is a helper function that enables using gofuzz (this
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
// fuzzing. Essentially, it enables translating the fuzzing bytes from
// go-fuzz to any Go object using this library.
//
// This implementation promises a constant translation from a given slice of
// bytes to the fuzzed objects. This promise will remain over future
// versions of Go and of this library.
//
// Note: the returned Fuzzer should not be shared between multiple goroutines,
// as its deterministic output will no longer be available.
//
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
//
// // +build gofuzz
// package mypacakge
// import fuzz "github.com/google/gofuzz"
// func Fuzz(data []byte) int {
// var i int
// fuzz.NewFromGoFuzz(data).Fuzz(&i)
// MyFunc(i)
// return 0
// }
func NewFromGoFuzz(data []byte) *Fuzzer {
return New().RandSource(bytesource.New(data))
}
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
// //
// Each entry in fuzzFuncs must be a function taking two parameters. // Each entry in fuzzFuncs must be a function taking two parameters.
@@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int {
} }
func (f *Fuzzer) genShouldFill() bool { func (f *Fuzzer) genShouldFill() bool {
return f.r.Float64() > f.nilChance return f.r.Float64() >= f.nilChance
} }
// MaxDepth sets the maximum number of recursive fuzz calls that will be made // MaxDepth sets the maximum number of recursive fuzz calls that will be made
@@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
fn(v, fc.fuzzer.r) fn(v, fc.fuzzer.r)
return return
} }
switch v.Kind() { switch v.Kind() {
case reflect.Map: case reflect.Map:
if fc.fuzzer.genShouldFill() { if fc.fuzzer.genShouldFill() {
@@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
v.SetFloat(r.Float64()) v.SetFloat(r.Float64())
}, },
reflect.Complex64: func(v reflect.Value, r *rand.Rand) { reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
panic("unimplemented") v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
}, },
reflect.Complex128: func(v reflect.Value, r *rand.Rand) { reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
panic("unimplemented") v.SetComplex(complex(r.Float64(), r.Float64()))
}, },
reflect.String: func(v reflect.Value, r *rand.Rand) { reflect.String: func(v reflect.Value, r *rand.Rand) {
v.SetString(randString(r)) v.SetString(randString(r))
@@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
// randBool returns true or false randomly. // randBool returns true or false randomly.
func randBool(r *rand.Rand) bool { func randBool(r *rand.Rand) bool {
if r.Int()&1 == 1 { return r.Int31()&(1<<30) == 0
return true
}
return false
} }
type charRange struct { type int63nPicker interface {
first, last rune Int63n(int64) int64
} }
// UnicodeRange describes a sequential range of unicode characters.
// Last must be numerically greater than First.
type UnicodeRange struct {
First, Last rune
}
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
// To be useful, each range must have at least one character (First <= Last) and
// there must be at least one range.
type UnicodeRanges []UnicodeRange
// choose returns a random unicode character from the given range, using the // choose returns a random unicode character from the given range, using the
// given randomness source. // given randomness source.
func (r *charRange) choose(rand *rand.Rand) rune { func (ur UnicodeRange) choose(r int63nPicker) rune {
count := int64(r.last - r.first) count := int64(ur.Last - ur.First + 1)
return r.first + rune(rand.Int63n(count)) return ur.First + rune(r.Int63n(count))
} }
var unicodeRanges = []charRange{ // CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from the range ur. If there are no characters
// in the range (cr.Last < cr.First), this will panic.
func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
ur.check()
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// check is a function that used to check whether the first of ur(UnicodeRange)
// is greater than the last one.
func (ur UnicodeRange) check() {
if ur.Last < ur.First {
panic("The last encoding must be greater than the first one.")
}
}
// randString of UnicodeRange makes a random string up to 20 characters long.
// Each character is selected form ur(UnicodeRange).
func (ur UnicodeRange) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur.choose(r))
}
return sb.String()
}
// defaultUnicodeRanges sets a default unicode range when user do not set
// CustomStringFuzzFunc() but wants fuzz string.
var defaultUnicodeRanges = UnicodeRanges{
{' ', '~'}, // ASCII characters {' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
} }
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from one of the ranges of ur(UnicodeRanges).
// Each range has an equal probability of being chosen. If there are no ranges,
// or a selected range has no characters (.Last < .First), this will panic.
// Do not modify any of the ranges in ur after calling this function.
func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
// Check unicode ranges slice is empty.
if len(ur) == 0 {
panic("UnicodeRanges is empty.")
}
// if not empty, each range should be checked.
for i := range ur {
ur[i].check()
}
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// randString of UnicodeRanges makes a random string up to 20 characters long.
// Each character is selected form one of the ranges of ur(UnicodeRanges),
// and each range has an equal probability of being chosen.
func (ur UnicodeRanges) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
}
return sb.String()
}
// randString makes a random string up to 20 characters long. The returned string // randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings. // may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand) string { func randString(r *rand.Rand) string {
n := r.Intn(20) return defaultUnicodeRanges.randString(r)
runes := make([]rune, n)
for i := range runes {
runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
}
return string(runes)
} }
// randUint64 makes random 64 bit numbers. // randUint64 makes random 64 bit numbers.

View File

@@ -1,3 +1,4 @@
# Compiler support code # Compiler support code
This directory contains compiler support code used by Gnostic and Gnostic extensions. This directory contains compiler support code used by Gnostic and Gnostic
extensions.

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,30 +14,36 @@
package compiler package compiler
import (
yaml "gopkg.in/yaml.v3"
)
// Context contains state of the compiler as it traverses a document. // Context contains state of the compiler as it traverses a document.
type Context struct { type Context struct {
Parent *Context Parent *Context
Name string Name string
Node *yaml.Node
ExtensionHandlers *[]ExtensionHandler ExtensionHandlers *[]ExtensionHandler
} }
// NewContextWithExtensions returns a new object representing the compiler state // NewContextWithExtensions returns a new object representing the compiler state
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers}
} }
// NewContext returns a new object representing the compiler state // NewContext returns a new object representing the compiler state
func NewContext(name string, parent *Context) *Context { func NewContext(name string, node *yaml.Node, parent *Context) *Context {
if parent != nil { if parent != nil {
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
} }
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
} }
// Description returns a text description of the compiler state // Description returns a text description of the compiler state
func (context *Context) Description() string { func (context *Context) Description() string {
name := context.Name
if context.Parent != nil { if context.Parent != nil {
return context.Parent.Description() + "." + context.Name name = context.Parent.Description() + "." + name
} }
return context.Name return name
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@
package compiler package compiler
import "fmt"
// Error represents compiler errors and their location in the document. // Error represents compiler errors and their location in the document.
type Error struct { type Error struct {
Context *Context Context *Context
@@ -25,12 +27,19 @@ func NewError(context *Context, message string) *Error {
return &Error{Context: context, Message: message} return &Error{Context: context, Message: message}
} }
func (err *Error) locationDescription() string {
if err.Context.Node != nil {
return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description())
}
return err.Context.Description()
}
// Error returns the string value of an Error. // Error returns the string value of an Error.
func (err *Error) Error() string { func (err *Error) Error() string {
if err.Context == nil { if err.Context == nil {
return "ERROR " + err.Message return err.Message
} }
return "ERROR " + err.Context.Description() + " " + err.Message return err.locationDescription() + " " + err.Message
} }
// ErrorGroup is a container for groups of Error values. // ErrorGroup is a container for groups of Error values.

View File

@@ -1,101 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"errors"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
ext_plugin "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v2"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// HandleExtension calls a binary extension handler.
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
handled := false
var errFromPlugin error
var outFromPlugin *any.Any
if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
if outFromPlugin == nil {
continue
} else {
handled = true
break
}
}
}
return handled, outFromPlugin, errFromPlugin
}
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
binary, _ := yaml.Marshal(in)
request := &ext_plugin.ExtensionHandlerRequest{}
version := &ext_plugin.Version{}
version.Major = 0
version.Minor = 1
version.Patch = 0
request.CompilerVersion = version
request.Wrapper = &ext_plugin.Wrapper{}
request.Wrapper.Version = "v2"
request.Wrapper.Yaml = string(binary)
request.Wrapper.ExtensionName = extensionName
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
fmt.Printf("Error: %+v\n", err)
return nil, err
}
response := &ext_plugin.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil {
fmt.Printf("Error: %+v\n", err)
fmt.Printf("%s\n", string(output))
return nil, err
}
if !response.Handled {
return nil, nil
}
if len(response.Error) != 0 {
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
return nil, errors.New(message)
}
return response.Value, nil
}
return nil, nil
}

View File

@@ -0,0 +1,85 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
extensions "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v3"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// CallExtension calls a binary extension handler.
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
if context == nil || context.ExtensionHandlers == nil {
return false, nil, nil
}
handled = false
for _, handler := range *(context.ExtensionHandlers) {
response, err = handler.handle(in, extensionName)
if response == nil {
continue
} else {
handled = true
break
}
}
return handled, response, err
}
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
yamlData, _ := yaml.Marshal(in)
request := &extensions.ExtensionHandlerRequest{
CompilerVersion: &extensions.Version{
Major: 0,
Minor: 1,
Patch: 0,
},
Wrapper: &extensions.Wrapper{
Version: "unknown", // TODO: set this to the type/version of spec being parsed.
Yaml: string(yamlData),
ExtensionName: extensionName,
},
}
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
return nil, err
}
response := &extensions.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil || !response.Handled {
return nil, err
}
if len(response.Errors) != 0 {
return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ","))
}
return response.Value, nil
}
return nil, nil
}

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -16,56 +16,63 @@ package compiler
import ( import (
"fmt" "fmt"
"gopkg.in/yaml.v2"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
"github.com/googleapis/gnostic/jsonschema"
"gopkg.in/yaml.v3"
) )
// compiler helper functions, usually called from generated code // compiler helper functions, usually called from generated code
// UnpackMap gets a yaml.MapSlice if possible. // UnpackMap gets a *yaml.Node if possible.
func UnpackMap(in interface{}) (yaml.MapSlice, bool) { func UnpackMap(in *yaml.Node) (*yaml.Node, bool) {
m, ok := in.(yaml.MapSlice) if in == nil {
if ok {
return m, true
}
// do we have an empty array?
a, ok := in.([]interface{})
if ok && len(a) == 0 {
// if so, return an empty map
return yaml.MapSlice{}, true
}
return nil, false return nil, false
}
return in, true
} }
// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. // SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice.
func SortedKeysForMap(m yaml.MapSlice) []string { func SortedKeysForMap(m *yaml.Node) []string {
keys := make([]string, 0) keys := make([]string, 0)
for _, item := range m { if m.Kind == yaml.MappingNode {
keys = append(keys, item.Key.(string)) for i := 0; i < len(m.Content); i += 2 {
keys = append(keys, m.Content[i].Value)
}
} }
sort.Strings(keys) sort.Strings(keys)
return keys return keys
} }
// MapHasKey returns true if a yaml.MapSlice contains a specified key. // MapHasKey returns true if a yamlv2.MapSlice contains a specified key.
func MapHasKey(m yaml.MapSlice, key string) bool { func MapHasKey(m *yaml.Node, key string) bool {
for _, item := range m { if m == nil {
itemKey, ok := item.Key.(string) return false
if ok && key == itemKey { }
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return true return true
} }
} }
}
return false return false
} }
// MapValueForKey gets the value of a map value for a specified key. // MapValueForKey gets the value of a map value for a specified key.
func MapValueForKey(m yaml.MapSlice, key string) interface{} { func MapValueForKey(m *yaml.Node, key string) *yaml.Node {
for _, item := range m { if m == nil {
itemKey, ok := item.Key.(string) return nil
if ok && key == itemKey { }
return item.Value if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return m.Content[i+1]
}
} }
} }
return nil return nil
@@ -83,8 +90,118 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
return stringArray return stringArray
} }
// SequenceNodeForNode returns a node if it is a SequenceNode.
func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) {
if node.Kind != yaml.SequenceNode {
return nil, false
}
return node, true
}
// BoolForScalarNode returns the bool value of a node.
func BoolForScalarNode(node *yaml.Node) (bool, bool) {
if node == nil {
return false, false
}
if node.Kind == yaml.DocumentNode {
return BoolForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return false, false
}
if node.Tag != "!!bool" {
return false, false
}
v, err := strconv.ParseBool(node.Value)
if err != nil {
return false, false
}
return v, true
}
// IntForScalarNode returns the integer value of a node.
func IntForScalarNode(node *yaml.Node) (int64, bool) {
if node == nil {
return 0, false
}
if node.Kind == yaml.DocumentNode {
return IntForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0, false
}
if node.Tag != "!!int" {
return 0, false
}
v, err := strconv.ParseInt(node.Value, 10, 64)
if err != nil {
return 0, false
}
return v, true
}
// FloatForScalarNode returns the float value of a node.
func FloatForScalarNode(node *yaml.Node) (float64, bool) {
if node == nil {
return 0.0, false
}
if node.Kind == yaml.DocumentNode {
return FloatForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0.0, false
}
if (node.Tag != "!!int") && (node.Tag != "!!float") {
return 0.0, false
}
v, err := strconv.ParseFloat(node.Value, 64)
if err != nil {
return 0.0, false
}
return v, true
}
// StringForScalarNode returns the string value of a node.
func StringForScalarNode(node *yaml.Node) (string, bool) {
if node == nil {
return "", false
}
if node.Kind == yaml.DocumentNode {
return StringForScalarNode(node.Content[0])
}
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!int":
return node.Value, true
case "!!str":
return node.Value, true
case "!!timestamp":
return node.Value, true
case "!!null":
return "", true
default:
return "", false
}
default:
return "", false
}
}
// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible.
func StringArrayForSequenceNode(node *yaml.Node) []string {
stringArray := make([]string, 0)
for _, item := range node.Content {
v, ok := StringForScalarNode(item)
if ok {
stringArray = append(stringArray, v)
}
}
return stringArray
}
// MissingKeysInMap identifies which keys from a list of required keys are not in a map. // MissingKeysInMap identifies which keys from a list of required keys are not in a map.
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string {
missingKeys := make([]string, 0) missingKeys := make([]string, 0)
for _, k := range requiredKeys { for _, k := range requiredKeys {
if !MapHasKey(m, k) { if !MapHasKey(m, k) {
@@ -95,12 +212,13 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
} }
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. // InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
invalidKeys := make([]string, 0) invalidKeys := make([]string, 0)
for _, item := range m { if m == nil || m.Kind != yaml.MappingNode {
itemKey, ok := item.Key.(string) return invalidKeys
if ok { }
key := itemKey for i := 0; i < len(m.Content); i += 2 {
key := m.Content[i].Value
found := false found := false
// does the key match an allowed key? // does the key match an allowed key?
for _, allowedKey := range allowedKeys { for _, allowedKey := range allowedKeys {
@@ -122,37 +240,81 @@ func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*
} }
} }
} }
}
return invalidKeys return invalidKeys
} }
// DescribeMap describes a map (for debugging purposes). // NewNullNode creates a new Null node.
func DescribeMap(in interface{}, indent string) string { func NewNullNode() *yaml.Node {
description := "" node := &yaml.Node{
m, ok := in.(map[string]interface{}) Kind: yaml.ScalarNode,
if ok { Tag: "!!null",
keys := make([]string, 0)
for k := range m {
keys = append(keys, k)
} }
sort.Strings(keys) return node
for _, k := range keys { }
v := m[k]
description += fmt.Sprintf("%s%s:\n", indent, k) // NewMappingNode creates a new Mapping node.
description += DescribeMap(v, indent+" ") func NewMappingNode() *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: make([]*yaml.Node, 0),
} }
return description }
// NewSequenceNode creates a new Sequence node.
func NewSequenceNode() *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
} }
a, ok := in.([]interface{}) return node
if ok { }
for i, v := range a {
description += fmt.Sprintf("%s%d:\n", indent, i) // NewScalarNodeForString creates a new node to hold a string.
description += DescribeMap(v, indent+" ") func NewScalarNodeForString(s string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: s,
} }
return description }
// NewSequenceNodeForStringArray creates a new node to hold an array of strings.
func NewSequenceNodeForStringArray(strings []string) *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
}
for _, s := range strings {
node.Content = append(node.Content, NewScalarNodeForString(s))
}
return node
}
// NewScalarNodeForBool creates a new node to hold a bool.
func NewScalarNodeForBool(b bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", b),
}
}
// NewScalarNodeForFloat creates a new node to hold a float.
func NewScalarNodeForFloat(f float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%g", f),
}
}
// NewScalarNodeForInt creates a new node to hold an integer.
func NewScalarNodeForInt(i int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", i),
} }
description += fmt.Sprintf("%s%+v\n", indent, in)
return description
} }
// PluralProperties returns the string "properties" pluralized. // PluralProperties returns the string "properties" pluralized.
@@ -195,3 +357,40 @@ func StringValue(item interface{}) (value string, ok bool) {
} }
return "", false return "", false
} }
// Description returns a human-readable represention of an item.
func Description(item interface{}) string {
value, ok := item.(*yaml.Node)
if ok {
return jsonschema.Render(value)
}
return fmt.Sprintf("%+v", item)
}
// Display returns a description of a node for use in error messages.
func Display(node *yaml.Node) string {
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!str":
return fmt.Sprintf("%s (string)", node.Value)
}
}
return fmt.Sprintf("%+v (%T)", node, node)
}
// Marshal creates a yaml version of a structure in our preferred style
func Marshal(in *yaml.Node) []byte {
clearStyle(in)
//bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}})
bytes, _ := yaml.Marshal(in)
return bytes
}
func clearStyle(node *yaml.Node) {
node.Style = 0
for _, c := range node.Content {
clearStyle(c)
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,7 +15,6 @@
package compiler package compiler
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
@@ -23,18 +22,30 @@ import (
"net/url" "net/url"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v3"
) )
var fileCache map[string][]byte
var infoCache map[string]interface{}
var count int64
var verboseReader = false var verboseReader = false
var fileCache map[string][]byte
var infoCache map[string]*yaml.Node
var fileCacheEnable = true var fileCacheEnable = true
var infoCacheEnable = true var infoCacheEnable = true
// These locks are used to synchronize accesses to the fileCache and infoCache
// maps (above). They are global state and can throw thread-related errors
// when modified from separate goroutines. The general strategy is to protect
// all public functions in this file with mutex Lock() calls. As a result, to
// avoid deadlock, these public functions should not call other public
// functions, so some public functions have private equivalents.
// In the future, we might consider replacing the maps with sync.Map and
// eliminating these mutexes.
var fileCacheMutex sync.Mutex
var infoCacheMutex sync.Mutex
func initializeFileCache() { func initializeFileCache() {
if fileCache == nil { if fileCache == nil {
fileCache = make(map[string][]byte, 0) fileCache = make(map[string][]byte, 0)
@@ -43,27 +54,42 @@ func initializeFileCache() {
func initializeInfoCache() { func initializeInfoCache() {
if infoCache == nil { if infoCache == nil {
infoCache = make(map[string]interface{}, 0) infoCache = make(map[string]*yaml.Node, 0)
} }
} }
// EnableFileCache turns on file caching.
func EnableFileCache() { func EnableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = true fileCacheEnable = true
} }
// EnableInfoCache turns on parsed info caching.
func EnableInfoCache() { func EnableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = true infoCacheEnable = true
} }
// DisableFileCache turns off file caching.
func DisableFileCache() { func DisableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = false fileCacheEnable = false
} }
// DisableInfoCache turns off parsed info caching.
func DisableInfoCache() { func DisableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = false infoCacheEnable = false
} }
// RemoveFromFileCache removes an entry from the file cache.
func RemoveFromFileCache(fileurl string) { func RemoveFromFileCache(fileurl string) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
if !fileCacheEnable { if !fileCacheEnable {
return return
} }
@@ -71,7 +97,10 @@ func RemoveFromFileCache(fileurl string) {
delete(fileCache, fileurl) delete(fileCache, fileurl)
} }
// RemoveFromInfoCache removes an entry from the info cache.
func RemoveFromInfoCache(filename string) { func RemoveFromInfoCache(filename string) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if !infoCacheEnable { if !infoCacheEnable {
return return
} }
@@ -79,21 +108,31 @@ func RemoveFromInfoCache(filename string) {
delete(infoCache, filename) delete(infoCache, filename)
} }
func GetInfoCache() map[string]interface{} { // GetInfoCache returns the info cache map.
func GetInfoCache() map[string]*yaml.Node {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if infoCache == nil { if infoCache == nil {
initializeInfoCache() initializeInfoCache()
} }
return infoCache return infoCache
} }
// ClearFileCache clears the file cache.
func ClearFileCache() { func ClearFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCache = make(map[string][]byte, 0) fileCache = make(map[string][]byte, 0)
} }
// ClearInfoCache clears the info cache.
func ClearInfoCache() { func ClearInfoCache() {
infoCache = make(map[string]interface{}) infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCache = make(map[string]*yaml.Node)
} }
// ClearCaches clears all caches.
func ClearCaches() { func ClearCaches() {
ClearFileCache() ClearFileCache()
ClearInfoCache() ClearInfoCache()
@@ -101,6 +140,12 @@ func ClearCaches() {
// FetchFile gets a specified file from the local filesystem or a remote location. // FetchFile gets a specified file from the local filesystem or a remote location.
func FetchFile(fileurl string) ([]byte, error) { func FetchFile(fileurl string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return fetchFile(fileurl)
}
func fetchFile(fileurl string) ([]byte, error) {
var bytes []byte var bytes []byte
initializeFileCache() initializeFileCache()
if fileCacheEnable { if fileCacheEnable {
@@ -121,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) {
} }
defer response.Body.Close() defer response.Body.Close()
if response.StatusCode != 200 { if response.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status)
} }
bytes, err = ioutil.ReadAll(response.Body) bytes, err = ioutil.ReadAll(response.Body)
if fileCacheEnable && err == nil { if fileCacheEnable && err == nil {
@@ -132,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) {
// ReadBytesForFile reads the bytes of a file. // ReadBytesForFile reads the bytes of a file.
func ReadBytesForFile(filename string) ([]byte, error) { func ReadBytesForFile(filename string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return readBytesForFile(filename)
}
func readBytesForFile(filename string) ([]byte, error) {
// is the filename a url? // is the filename a url?
fileurl, _ := url.Parse(filename) fileurl, _ := url.Parse(filename)
if fileurl.Scheme != "" { if fileurl.Scheme != "" {
// yes, fetch it // yes, fetch it
bytes, err := FetchFile(filename) bytes, err := fetchFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -150,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) {
return bytes, nil return bytes, nil
} }
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. // ReadInfoFromBytes unmarshals a file as a *yaml.Node.
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
return readInfoFromBytes(filename, bytes)
}
func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
initializeInfoCache() initializeInfoCache()
if infoCacheEnable { if infoCacheEnable {
cachedInfo, ok := infoCache[filename] cachedInfo, ok := infoCache[filename]
@@ -165,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
log.Printf("Reading info for file %s", filename) log.Printf("Reading info for file %s", filename)
} }
} }
var info yaml.MapSlice var info yaml.Node
err := yaml.Unmarshal(bytes, &info) err := yaml.Unmarshal(bytes, &info)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if infoCacheEnable && len(filename) > 0 { if infoCacheEnable && len(filename) > 0 {
infoCache[filename] = info infoCache[filename] = &info
} }
return info, nil return &info, nil
} }
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
func ReadInfoForRef(basefile string, ref string) (interface{}, error) { func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
initializeInfoCache() initializeInfoCache()
if infoCacheEnable { if infoCacheEnable {
info, ok := infoCache[ref] info, ok := infoCache[ref]
@@ -191,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
log.Printf("Reading info for ref %s#%s", basefile, ref) log.Printf("Reading info for ref %s#%s", basefile, ref)
} }
} }
count = count + 1
basedir, _ := filepath.Split(basefile) basedir, _ := filepath.Split(basefile)
parts := strings.Split(ref, "#") parts := strings.Split(ref, "#")
var filename string var filename string
@@ -204,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
} else { } else {
filename = basefile filename = basefile
} }
bytes, err := ReadBytesForFile(filename) bytes, err := readBytesForFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := ReadInfoFromBytes(filename, bytes) info, err := readInfoFromBytes(filename, bytes)
if info != nil && info.Kind == yaml.DocumentNode {
info = info.Content[0]
}
if err != nil { if err != nil {
log.Printf("File error: %v\n", err) log.Printf("File error: %v\n", err)
} else { } else {
if info == nil {
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
}
if len(parts) > 1 { if len(parts) > 1 {
path := strings.Split(parts[1], "/") path := strings.Split(parts[1], "/")
for i, key := range path { for i, key := range path {
if i > 0 { if i > 0 {
m, ok := info.(yaml.MapSlice) m := info
if ok { if true {
found := false found := false
for _, section := range m { for i := 0; i < len(m.Content); i += 2 {
if section.Key == key { if m.Content[i].Value == key {
info = section.Value info = m.Content[i+1]
found = true found = true
} }
} }

View File

@@ -1,5 +1,13 @@
# Extensions # Extensions
This directory contains support code for building Gnostic extensions and associated examples. **Extension Support is experimental.**
Extensions are used to compile vendor or specification extensions into protocol buffer structures. This directory contains support code for building Gnostic extensio handlers and
associated examples.
Extension handlers can be used to compile vendor or specification extensions
into protocol buffer structures.
Like plugins, extension handlers are built as separate executables. Extension
bodies are written to extension handlers as serialized
ExtensionHandlerRequests.

View File

@@ -1,148 +1,186 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.0
// source: extensions/extension.proto // source: extensions/extension.proto
package openapiextension_v1 package gnostic_extension_v1
import ( import (
fmt "fmt"
proto "github.com/golang/protobuf/proto" proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any" any "github.com/golang/protobuf/ptypes/any"
math "math" protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
) )
// Reference imports to suppress errors if they are not otherwise used. const (
var _ = proto.Marshal // Verify that this generated code is sufficiently up-to-date.
var _ = fmt.Errorf _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
var _ = math.Inf // Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion to ensure that this generated file // This is a compile-time assertion that a sufficiently up-to-date version
// is compatible with the proto package it is being compiled against. // of the legacy proto package is being used.
// A compilation error at this line likely means your copy of the const _ = proto.ProtoPackageIsVersion4
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The version number of OpenAPI compiler. // The version number of Gnostic.
type Version struct { type Version struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases. // be empty for mainline stable releases.
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Version) Reset() { *m = Version{} } func (x *Version) Reset() {
func (m *Version) String() string { return proto.CompactTextString(m) } *x = Version{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Version) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Version) ProtoMessage() {} func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Version.ProtoReflect.Descriptor instead.
func (*Version) Descriptor() ([]byte, []int) { func (*Version) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{0} return file_extensions_extension_proto_rawDescGZIP(), []int{0}
} }
func (m *Version) XXX_Unmarshal(b []byte) error { func (x *Version) GetMajor() int32 {
return xxx_messageInfo_Version.Unmarshal(m, b) if x != nil {
} return x.Major
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
}
func (m *Version) XXX_Merge(src proto.Message) {
xxx_messageInfo_Version.Merge(m, src)
}
func (m *Version) XXX_Size() int {
return xxx_messageInfo_Version.Size(m)
}
func (m *Version) XXX_DiscardUnknown() {
xxx_messageInfo_Version.DiscardUnknown(m)
}
var xxx_messageInfo_Version proto.InternalMessageInfo
func (m *Version) GetMajor() int32 {
if m != nil {
return m.Major
} }
return 0 return 0
} }
func (m *Version) GetMinor() int32 { func (x *Version) GetMinor() int32 {
if m != nil { if x != nil {
return m.Minor return x.Minor
} }
return 0 return 0
} }
func (m *Version) GetPatch() int32 { func (x *Version) GetPatch() int32 {
if m != nil { if x != nil {
return m.Patch return x.Patch
} }
return 0 return 0
} }
func (m *Version) GetSuffix() string { func (x *Version) GetSuffix() string {
if m != nil { if x != nil {
return m.Suffix return x.Suffix
} }
return "" return ""
} }
// An encoded Request is written to the ExtensionHandler's stdin. // An encoded Request is written to the ExtensionHandler's stdin.
type ExtensionHandlerRequest struct { type ExtensionHandlerRequest struct {
// The OpenAPI descriptions that were explicitly listed on the command line. state protoimpl.MessageState
// The specifications will appear in the order they are specified to gnostic. sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The extension to process.
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
// The version number of openapi compiler. // The version number of Gnostic.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` }
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` func (x *ExtensionHandlerRequest) Reset() {
*x = ExtensionHandlerRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerRequest) String() string {
return protoimpl.X.MessageStringOf(x)
} }
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerRequest) ProtoMessage() {} func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{1} return file_extensions_extension_proto_rawDescGZIP(), []int{1}
} }
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper {
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) if x != nil {
} return x.Wrapper
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
}
func (m *ExtensionHandlerRequest) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
}
func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if m != nil {
return m.Wrapper
} }
return nil return nil
} }
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if m != nil { if x != nil {
return m.CompilerVersion return x.CompilerVersion
} }
return nil return nil
} }
// The extensions writes an encoded ExtensionHandlerResponse to stdout. // The extensions writes an encoded ExtensionHandlerResponse to stdout.
type ExtensionHandlerResponse struct { type ExtensionHandlerResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// true if the extension is handled by the extension handler; false otherwise // true if the extension is handled by the extension handler; false otherwise
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
// Error message. If non-empty, the extension handling failed. // Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero // The extension handler process should exit with status code zero
// even if it reports an error in this way. // even if it reports an error in this way.
// //
@@ -151,150 +189,277 @@ type ExtensionHandlerResponse struct {
// itself -- such as the input Document being unparseable -- should be // itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero // reported by writing a message to stderr and exiting with a non-zero
// status code. // status code.
Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
// text output // text output
Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } func (x *ExtensionHandlerResponse) Reset() {
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } *x = ExtensionHandlerResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionHandlerResponse) ProtoMessage() {} func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{2} return file_extensions_extension_proto_rawDescGZIP(), []int{2}
} }
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { func (x *ExtensionHandlerResponse) GetHandled() bool {
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) if x != nil {
} return x.Handled
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
}
func (m *ExtensionHandlerResponse) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
}
func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
func (m *ExtensionHandlerResponse) GetHandled() bool {
if m != nil {
return m.Handled
} }
return false return false
} }
func (m *ExtensionHandlerResponse) GetError() []string { func (x *ExtensionHandlerResponse) GetErrors() []string {
if m != nil { if x != nil {
return m.Error return x.Errors
} }
return nil return nil
} }
func (m *ExtensionHandlerResponse) GetValue() *any.Any { func (x *ExtensionHandlerResponse) GetValue() *any.Any {
if m != nil { if x != nil {
return m.Value return x.Value
} }
return nil return nil
} }
type Wrapper struct { type Wrapper struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// version of the OpenAPI specification in which this extension was written. // version of the OpenAPI specification in which this extension was written.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Name of the extension // Name of the extension.
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
// Must be a valid yaml for the proto // YAML-formatted extension value.
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Wrapper) Reset() { *m = Wrapper{} } func (x *Wrapper) Reset() {
func (m *Wrapper) String() string { return proto.CompactTextString(m) } *x = Wrapper{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Wrapper) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Wrapper) ProtoMessage() {} func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead.
func (*Wrapper) Descriptor() ([]byte, []int) { func (*Wrapper) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{3} return file_extensions_extension_proto_rawDescGZIP(), []int{3}
} }
func (m *Wrapper) XXX_Unmarshal(b []byte) error { func (x *Wrapper) GetVersion() string {
return xxx_messageInfo_Wrapper.Unmarshal(m, b) if x != nil {
} return x.Version
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
}
func (m *Wrapper) XXX_Merge(src proto.Message) {
xxx_messageInfo_Wrapper.Merge(m, src)
}
func (m *Wrapper) XXX_Size() int {
return xxx_messageInfo_Wrapper.Size(m)
}
func (m *Wrapper) XXX_DiscardUnknown() {
xxx_messageInfo_Wrapper.DiscardUnknown(m)
}
var xxx_messageInfo_Wrapper proto.InternalMessageInfo
func (m *Wrapper) GetVersion() string {
if m != nil {
return m.Version
} }
return "" return ""
} }
func (m *Wrapper) GetExtensionName() string { func (x *Wrapper) GetExtensionName() string {
if m != nil { if x != nil {
return m.ExtensionName return x.ExtensionName
} }
return "" return ""
} }
func (m *Wrapper) GetYaml() string { func (x *Wrapper) GetYaml() string {
if m != nil { if x != nil {
return m.Yaml return x.Yaml
} }
return "" return ""
} }
func init() { var File_extensions_extension_proto protoreflect.FileDescriptor
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") var file_extensions_extension_proto_rawDesc = []byte{
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74,
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e,
0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14,
0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37,
0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07,
0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65,
0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61,
0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12,
0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57,
0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e,
0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } var (
file_extensions_extension_proto_rawDescOnce sync.Once
file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc
)
var fileDescriptor_661e47e790f76671 = []byte{ func file_extensions_extension_proto_rawDescGZIP() []byte {
// 362 bytes of a gzipped FileDescriptorProto file_extensions_extension_proto_rawDescOnce.Do(func() {
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData)
0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, })
0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, return file_extensions_extension_proto_rawDescData
0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, }
0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, var file_extensions_extension_proto_goTypes = []interface{}{
0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, (*Version)(nil), // 0: gnostic.extension.v1.Version
0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, (*any.Any)(nil), // 4: google.protobuf.Any
0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, }
0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, var file_extensions_extension_proto_depIdxs = []int32{
0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version
0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any
0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, 3, // [3:3] is the sub-list for method output_type
0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, 3, // [3:3] is the sub-list for method input_type
0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, 3, // [3:3] is the sub-list for extension type_name
0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, 3, // [3:3] is the sub-list for extension extendee
0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, 0, // [0:3] is the sub-list for field type_name
0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, }
func init() { file_extensions_extension_proto_init() }
func file_extensions_extension_proto_init() {
if File_extensions_extension_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Version); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Wrapper); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_extensions_extension_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_extensions_extension_proto_goTypes,
DependencyIndexes: file_extensions_extension_proto_depIdxs,
MessageInfos: file_extensions_extension_proto_msgTypes,
}.Build()
File_extensions_extension_proto = out.File
file_extensions_extension_proto_rawDesc = nil
file_extensions_extension_proto_goTypes = nil
file_extensions_extension_proto_depIdxs = nil
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,8 +14,9 @@
syntax = "proto3"; syntax = "proto3";
package gnostic.extension.v1;
import "google/protobuf/any.proto"; import "google/protobuf/any.proto";
package openapiextension.v1;
// This option lets the proto compiler generate Java code inside the package // This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler // name (see below) instead of inside an outer class. It creates a simpler
@@ -26,7 +27,7 @@ option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This // The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to // class is only used to hold proto descriptor, so developers don't need to
// work with it directly. // work with it directly.
option java_outer_classname = "OpenAPIExtensionV1"; option java_outer_classname = "GnosticExtension";
// The Java package name must be proto package name with proper prefix. // The Java package name must be proto package name with proper prefix.
option java_package = "org.gnostic.v1"; option java_package = "org.gnostic.v1";
@@ -37,9 +38,12 @@ option java_package = "org.gnostic.v1";
// hopefully unique enough to not conflict with things that may come along in // hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself. // the future. 'GPB' is reserved for the protocol buffer implementation itself.
// //
option objc_class_prefix = "OAE"; // "OpenAPI Extension" option objc_class_prefix = "GNX"; // "Gnostic Extension"
// The version number of OpenAPI compiler. // The Go package name.
option go_package = "extensions;gnostic_extension_v1";
// The version number of Gnostic.
message Version { message Version {
int32 major = 1; int32 major = 1;
int32 minor = 2; int32 minor = 2;
@@ -52,12 +56,11 @@ message Version {
// An encoded Request is written to the ExtensionHandler's stdin. // An encoded Request is written to the ExtensionHandler's stdin.
message ExtensionHandlerRequest { message ExtensionHandlerRequest {
// The OpenAPI descriptions that were explicitly listed on the command line. // The extension to process.
// The specifications will appear in the order they are specified to gnostic.
Wrapper wrapper = 1; Wrapper wrapper = 1;
// The version number of openapi compiler. // The version number of Gnostic.
Version compiler_version = 3; Version compiler_version = 2;
} }
// The extensions writes an encoded ExtensionHandlerResponse to stdout. // The extensions writes an encoded ExtensionHandlerResponse to stdout.
@@ -66,7 +69,7 @@ message ExtensionHandlerResponse {
// true if the extension is handled by the extension handler; false otherwise // true if the extension is handled by the extension handler; false otherwise
bool handled = 1; bool handled = 1;
// Error message. If non-empty, the extension handling failed. // Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero // The extension handler process should exit with status code zero
// even if it reports an error in this way. // even if it reports an error in this way.
// //
@@ -75,7 +78,7 @@ message ExtensionHandlerResponse {
// itself -- such as the input Document being unparseable -- should be // itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero // reported by writing a message to stderr and exiting with a non-zero
// status code. // status code.
repeated string error = 2; repeated string errors = 2;
// text output // text output
google.protobuf.Any value = 3; google.protobuf.Any value = 3;
@@ -85,9 +88,9 @@ message Wrapper {
// version of the OpenAPI specification in which this extension was written. // version of the OpenAPI specification in which this extension was written.
string version = 1; string version = 1;
// Name of the extension // Name of the extension.
string extension_name = 2; string extension_name = 2;
// Must be a valid yaml for the proto // YAML-formatted extension value.
string yaml = 3; string yaml = 3;
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,71 +12,53 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package openapiextension_v1 package gnostic_extension_v1
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
) )
type documentHandler func(version string, extensionName string, document string)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
func forInputYamlFromOpenapic(handler documentHandler) { // Main implements the main program of an extension handler.
func Main(handler extensionHandler) {
// unpack the request
data, err := ioutil.ReadAll(os.Stdin) data, err := ioutil.ReadAll(os.Stdin)
if err != nil { if err != nil {
fmt.Println("File error:", err.Error()) log.Println("File error:", err.Error())
os.Exit(1) os.Exit(1)
} }
if len(data) == 0 { if len(data) == 0 {
fmt.Println("No input data.") log.Println("No input data.")
os.Exit(1) os.Exit(1)
} }
request := &ExtensionHandlerRequest{} request := &ExtensionHandlerRequest{}
err = proto.Unmarshal(data, request) err = proto.Unmarshal(data, request)
if err != nil { if err != nil {
fmt.Println("Input error:", err.Error()) log.Println("Input error:", err.Error())
os.Exit(1) os.Exit(1)
} }
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) // call the handler
} handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml)
// respond with the output of the handler
// ProcessExtension calles the handler for a specified extension. response := &ExtensionHandlerResponse{
func ProcessExtension(handleExtension extensionHandler) { Handled: false, // default assumption
response := &ExtensionHandlerResponse{} Errors: make([]string, 0),
forInputYamlFromOpenapic(
func(version string, extensionName string, yamlInput string) {
var newObject proto.Message
var err error
handled, newObject, err := handleExtension(extensionName, yamlInput)
if !handled {
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
} }
if err != nil {
// If we reach here, then the extension is handled response.Errors = append(response.Errors, err.Error())
} else if handled {
response.Handled = true response.Handled = true
response.Value, err = ptypes.MarshalAny(output)
if err != nil { if err != nil {
response.Error = append(response.Error, err.Error()) response.Errors = append(response.Errors, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
} }
response.Value, err = ptypes.MarshalAny(newObject)
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
} }
})
responseBytes, _ := proto.Marshal(response) responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes) os.Stdout.Write(responseBytes)
} }

View File

@@ -0,0 +1,4 @@
# jsonschema
This directory contains code for reading, writing, and manipulating JSON
schemas.

View File

@@ -0,0 +1,84 @@
// THIS FILE IS AUTOMATICALLY GENERATED.
package jsonschema
import (
"encoding/base64"
)
func baseSchemaBytes() ([]byte, error){
return base64.StdEncoding.DecodeString(
`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi
JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl
c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK
ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg
ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp
bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp
dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl
ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK
ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v
bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg
ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki
LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p
bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s
CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog
ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg
ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog
ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg
ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog
ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6
IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog
ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1
ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl
ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw
ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg
ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg
ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg
ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg
IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0
aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg
ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg
ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg
ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK
ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi
ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP
ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy
ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg
ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv
ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi
OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl
SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs
dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k
ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk
cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl
cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh
ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg
ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk
ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk
ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6
IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi
b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9
LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl
cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv
bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog
ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq
ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg
ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg
ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg
ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg
ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu
aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh
bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl
cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs
CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs
ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg
ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg
ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy
YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}

View File

@@ -0,0 +1,229 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"strings"
)
//
// DISPLAY
// The following methods display Schemas.
//
// Description returns a string representation of a string or string array.
func (s *StringOrStringArray) Description() string {
if s.String != nil {
return *s.String
}
if s.StringArray != nil {
return strings.Join(*s.StringArray, ", ")
}
return ""
}
// Returns a string representation of a Schema.
func (schema *Schema) String() string {
return schema.describeSchema("")
}
// Helper: Returns a string representation of a Schema indented by a specified string.
func (schema *Schema) describeSchema(indent string) string {
result := ""
if schema.Schema != nil {
result += indent + "$schema: " + *(schema.Schema) + "\n"
}
if schema.ID != nil {
result += indent + "id: " + *(schema.ID) + "\n"
}
if schema.MultipleOf != nil {
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
}
if schema.Maximum != nil {
result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum))
}
if schema.ExclusiveMaximum != nil {
result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum))
}
if schema.ExclusiveMinimum != nil {
result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength))
}
if schema.MinLength != nil {
result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength))
}
if schema.Pattern != nil {
result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern))
}
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
result += indent + "additionalItems:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalItems.Boolean)
result += indent + fmt.Sprintf("additionalItems: %+v\n", b)
}
}
if schema.Items != nil {
result += indent + "items:\n"
items := schema.Items
if items.SchemaArray != nil {
for i, s := range *(items.SchemaArray) {
result += indent + " " + fmt.Sprintf("%d", i) + ":\n"
result += s.describeSchema(indent + " " + " ")
}
} else if items.Schema != nil {
result += items.Schema.describeSchema(indent + " " + " ")
}
}
if schema.MaxItems != nil {
result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems))
}
if schema.MinItems != nil {
result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems))
}
if schema.UniqueItems != nil {
result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems))
}
if schema.MaxProperties != nil {
result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties))
}
if schema.MinProperties != nil {
result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties))
}
if schema.Required != nil {
result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required))
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
result += indent + "additionalProperties:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalProperties.Boolean)
result += indent + fmt.Sprintf("additionalProperties: %+v\n", b)
}
}
if schema.Properties != nil {
result += indent + "properties:\n"
for _, pair := range *(schema.Properties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.PatternProperties != nil {
result += indent + "patternProperties:\n"
for _, pair := range *(schema.PatternProperties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Dependencies != nil {
result += indent + "dependencies:\n"
for _, pair := range *(schema.Dependencies) {
name := pair.Name
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
} else {
a := schemaOrStringArray.StringArray
if a != nil {
result += indent + " " + name + ":\n"
for _, s2 := range *a {
result += indent + " " + " " + s2 + "\n"
}
}
}
}
}
if schema.Enumeration != nil {
result += indent + "enumeration:\n"
for _, value := range *(schema.Enumeration) {
if value.String != nil {
result += indent + " " + fmt.Sprintf("%+v\n", *value.String)
} else {
result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool)
}
}
}
if schema.Type != nil {
result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description())
}
if schema.AllOf != nil {
result += indent + "allOf:\n"
for _, s := range *(schema.AllOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.AnyOf != nil {
result += indent + "anyOf:\n"
for _, s := range *(schema.AnyOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.OneOf != nil {
result += indent + "oneOf:\n"
for _, s := range *(schema.OneOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.Not != nil {
result += indent + "not:\n"
result += schema.Not.describeSchema(indent + " ")
}
if schema.Definitions != nil {
result += indent + "definitions:\n"
for _, pair := range *(schema.Definitions) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Title != nil {
result += indent + "title: " + *(schema.Title) + "\n"
}
if schema.Description != nil {
result += indent + "description: " + *(schema.Description) + "\n"
}
if schema.Default != nil {
result += indent + "default:\n"
result += indent + fmt.Sprintf(" %+v\n", *(schema.Default))
}
if schema.Format != nil {
result += indent + "format: " + *(schema.Format) + "\n"
}
if schema.Ref != nil {
result += indent + "$ref: " + *(schema.Ref) + "\n"
}
return result
}

View File

@@ -0,0 +1,228 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package jsonschema supports the reading, writing, and manipulation
// of JSON Schemas.
package jsonschema
import "gopkg.in/yaml.v3"
// The Schema struct models a JSON Schema and, because schemas are
// defined hierarchically, contains many references to itself.
// All fields are pointers and are nil if the associated values
// are not specified.
type Schema struct {
Schema *string // $schema
ID *string // id keyword used for $ref resolution scope
Ref *string // $ref, i.e. JSON Pointers
// http://json-schema.org/latest/json-schema-validation.html
// 5.1. Validation keywords for numeric instances (number and integer)
MultipleOf *SchemaNumber
Maximum *SchemaNumber
ExclusiveMaximum *bool
Minimum *SchemaNumber
ExclusiveMinimum *bool
// 5.2. Validation keywords for strings
MaxLength *int64
MinLength *int64
Pattern *string
// 5.3. Validation keywords for arrays
AdditionalItems *SchemaOrBoolean
Items *SchemaOrSchemaArray
MaxItems *int64
MinItems *int64
UniqueItems *bool
// 5.4. Validation keywords for objects
MaxProperties *int64
MinProperties *int64
Required *[]string
AdditionalProperties *SchemaOrBoolean
Properties *[]*NamedSchema
PatternProperties *[]*NamedSchema
Dependencies *[]*NamedSchemaOrStringArray
// 5.5. Validation keywords for any instance type
Enumeration *[]SchemaEnumValue
Type *StringOrStringArray
AllOf *[]*Schema
AnyOf *[]*Schema
OneOf *[]*Schema
Not *Schema
Definitions *[]*NamedSchema
// 6. Metadata keywords
Title *string
Description *string
Default *yaml.Node
// 7. Semantic validation with "format"
Format *string
}
// These helper structs represent "combination" types that generally can
// have values of one type or another. All are used to represent parts
// of Schemas.
// SchemaNumber represents a value that can be either an Integer or a Float.
type SchemaNumber struct {
Integer *int64
Float *float64
}
// NewSchemaNumberWithInteger creates and returns a new object
func NewSchemaNumberWithInteger(i int64) *SchemaNumber {
result := &SchemaNumber{}
result.Integer = &i
return result
}
// NewSchemaNumberWithFloat creates and returns a new object
func NewSchemaNumberWithFloat(f float64) *SchemaNumber {
result := &SchemaNumber{}
result.Float = &f
return result
}
// SchemaOrBoolean represents a value that can be either a Schema or a Boolean.
type SchemaOrBoolean struct {
Schema *Schema
Boolean *bool
}
// NewSchemaOrBooleanWithSchema creates and returns a new object
func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Schema = s
return result
}
// NewSchemaOrBooleanWithBoolean creates and returns a new object
func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Boolean = &b
return result
}
// StringOrStringArray represents a value that can be either
// a String or an Array of Strings.
type StringOrStringArray struct {
String *string
StringArray *[]string
}
// NewStringOrStringArrayWithString creates and returns a new object
func NewStringOrStringArrayWithString(s string) *StringOrStringArray {
result := &StringOrStringArray{}
result.String = &s
return result
}
// NewStringOrStringArrayWithStringArray creates and returns a new object
func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray {
result := &StringOrStringArray{}
result.StringArray = &a
return result
}
// SchemaOrStringArray represents a value that can be either
// a Schema or an Array of Strings.
type SchemaOrStringArray struct {
Schema *Schema
StringArray *[]string
}
// SchemaOrSchemaArray represents a value that can be either
// a Schema or an Array of Schemas.
type SchemaOrSchemaArray struct {
Schema *Schema
SchemaArray *[]*Schema
}
// NewSchemaOrSchemaArrayWithSchema creates and returns a new object
func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.Schema = s
return result
}
// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object
func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.SchemaArray = &a
return result
}
// SchemaEnumValue represents a value that can be part of an
// enumeration in a Schema.
type SchemaEnumValue struct {
String *string
Bool *bool
}
// NamedSchema is a name-value pair that is used to emulate maps
// with ordered keys.
type NamedSchema struct {
Name string
Value *Schema
}
// NewNamedSchema creates and returns a new object
func NewNamedSchema(name string, value *Schema) *NamedSchema {
return &NamedSchema{Name: name, Value: value}
}
// NamedSchemaOrStringArray is a name-value pair that is used
// to emulate maps with ordered keys.
type NamedSchemaOrStringArray struct {
Name string
Value *SchemaOrStringArray
}
// Access named subschemas by name
func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema {
if array == nil {
return nil
}
for _, pair := range *array {
if pair.Name == name {
return pair.Value
}
}
return nil
}
// PropertyWithName returns the selected element.
func (s *Schema) PropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Properties, name)
}
// PatternPropertyWithName returns the selected element.
func (s *Schema) PatternPropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.PatternProperties, name)
}
// DefinitionWithName returns the selected element.
func (s *Schema) DefinitionWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Definitions, name)
}
// AddProperty adds a named property.
func (s *Schema) AddProperty(name string, property *Schema) {
*s.Properties = append(*s.Properties, NewNamedSchema(name, property))
}

View File

@@ -0,0 +1,394 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"log"
"strings"
)
//
// OPERATIONS
// The following methods perform operations on Schemas.
//
// IsEmpty returns true if no members of the Schema are specified.
func (schema *Schema) IsEmpty() bool {
return (schema.Schema == nil) &&
(schema.ID == nil) &&
(schema.MultipleOf == nil) &&
(schema.Maximum == nil) &&
(schema.ExclusiveMaximum == nil) &&
(schema.Minimum == nil) &&
(schema.ExclusiveMinimum == nil) &&
(schema.MaxLength == nil) &&
(schema.MinLength == nil) &&
(schema.Pattern == nil) &&
(schema.AdditionalItems == nil) &&
(schema.Items == nil) &&
(schema.MaxItems == nil) &&
(schema.MinItems == nil) &&
(schema.UniqueItems == nil) &&
(schema.MaxProperties == nil) &&
(schema.MinProperties == nil) &&
(schema.Required == nil) &&
(schema.AdditionalProperties == nil) &&
(schema.Properties == nil) &&
(schema.PatternProperties == nil) &&
(schema.Dependencies == nil) &&
(schema.Enumeration == nil) &&
(schema.Type == nil) &&
(schema.AllOf == nil) &&
(schema.AnyOf == nil) &&
(schema.OneOf == nil) &&
(schema.Not == nil) &&
(schema.Definitions == nil) &&
(schema.Title == nil) &&
(schema.Description == nil) &&
(schema.Default == nil) &&
(schema.Format == nil) &&
(schema.Ref == nil)
}
// IsEqual returns true if two schemas are equal.
func (schema *Schema) IsEqual(schema2 *Schema) bool {
return schema.String() == schema2.String()
}
// SchemaOperation represents a function that can be applied to a Schema.
type SchemaOperation func(schema *Schema, context string)
// Applies a specified function to a Schema and all of the Schemas that it contains.
func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) {
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalItems")
}
}
if schema.Items != nil {
if schema.Items.SchemaArray != nil {
for _, s := range *(schema.Items.SchemaArray) {
s.applyToSchemas(operation, "Items.SchemaArray")
}
} else if schema.Items.Schema != nil {
schema.Items.Schema.applyToSchemas(operation, "Items.Schema")
}
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalProperties")
}
}
if schema.Properties != nil {
for _, pair := range *(schema.Properties) {
s := pair.Value
s.applyToSchemas(operation, "Properties")
}
}
if schema.PatternProperties != nil {
for _, pair := range *(schema.PatternProperties) {
s := pair.Value
s.applyToSchemas(operation, "PatternProperties")
}
}
if schema.Dependencies != nil {
for _, pair := range *(schema.Dependencies) {
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
s.applyToSchemas(operation, "Dependencies")
}
}
}
if schema.AllOf != nil {
for _, s := range *(schema.AllOf) {
s.applyToSchemas(operation, "AllOf")
}
}
if schema.AnyOf != nil {
for _, s := range *(schema.AnyOf) {
s.applyToSchemas(operation, "AnyOf")
}
}
if schema.OneOf != nil {
for _, s := range *(schema.OneOf) {
s.applyToSchemas(operation, "OneOf")
}
}
if schema.Not != nil {
schema.Not.applyToSchemas(operation, "Not")
}
if schema.Definitions != nil {
for _, pair := range *(schema.Definitions) {
s := pair.Value
s.applyToSchemas(operation, "Definitions")
}
}
operation(schema, context)
}
// CopyProperties copies all non-nil properties from the source Schema to the schema Schema.
func (schema *Schema) CopyProperties(source *Schema) {
if source.Schema != nil {
schema.Schema = source.Schema
}
if source.ID != nil {
schema.ID = source.ID
}
if source.MultipleOf != nil {
schema.MultipleOf = source.MultipleOf
}
if source.Maximum != nil {
schema.Maximum = source.Maximum
}
if source.ExclusiveMaximum != nil {
schema.ExclusiveMaximum = source.ExclusiveMaximum
}
if source.Minimum != nil {
schema.Minimum = source.Minimum
}
if source.ExclusiveMinimum != nil {
schema.ExclusiveMinimum = source.ExclusiveMinimum
}
if source.MaxLength != nil {
schema.MaxLength = source.MaxLength
}
if source.MinLength != nil {
schema.MinLength = source.MinLength
}
if source.Pattern != nil {
schema.Pattern = source.Pattern
}
if source.AdditionalItems != nil {
schema.AdditionalItems = source.AdditionalItems
}
if source.Items != nil {
schema.Items = source.Items
}
if source.MaxItems != nil {
schema.MaxItems = source.MaxItems
}
if source.MinItems != nil {
schema.MinItems = source.MinItems
}
if source.UniqueItems != nil {
schema.UniqueItems = source.UniqueItems
}
if source.MaxProperties != nil {
schema.MaxProperties = source.MaxProperties
}
if source.MinProperties != nil {
schema.MinProperties = source.MinProperties
}
if source.Required != nil {
schema.Required = source.Required
}
if source.AdditionalProperties != nil {
schema.AdditionalProperties = source.AdditionalProperties
}
if source.Properties != nil {
schema.Properties = source.Properties
}
if source.PatternProperties != nil {
schema.PatternProperties = source.PatternProperties
}
if source.Dependencies != nil {
schema.Dependencies = source.Dependencies
}
if source.Enumeration != nil {
schema.Enumeration = source.Enumeration
}
if source.Type != nil {
schema.Type = source.Type
}
if source.AllOf != nil {
schema.AllOf = source.AllOf
}
if source.AnyOf != nil {
schema.AnyOf = source.AnyOf
}
if source.OneOf != nil {
schema.OneOf = source.OneOf
}
if source.Not != nil {
schema.Not = source.Not
}
if source.Definitions != nil {
schema.Definitions = source.Definitions
}
if source.Title != nil {
schema.Title = source.Title
}
if source.Description != nil {
schema.Description = source.Description
}
if source.Default != nil {
schema.Default = source.Default
}
if source.Format != nil {
schema.Format = source.Format
}
if source.Ref != nil {
schema.Ref = source.Ref
}
}
// TypeIs returns true if the Type of a Schema includes the specified type
func (schema *Schema) TypeIs(typeName string) bool {
if schema.Type != nil {
// the schema Type is either a string or an array of strings
if schema.Type.String != nil {
return (*(schema.Type.String) == typeName)
} else if schema.Type.StringArray != nil {
for _, n := range *(schema.Type.StringArray) {
if n == typeName {
return true
}
}
}
}
return false
}
// ResolveRefs resolves "$ref" elements in a Schema and its children.
// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf,
// the reference is kept and we expect downstream tools to separately model these
// referenced schemas.
func (schema *Schema) ResolveRefs() {
rootSchema := schema
count := 1
for count > 0 {
count = 0
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.Ref != nil {
resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref))
if err != nil {
log.Printf("%+v", err)
} else if resolvedRef.TypeIs("object") {
// don't substitute for objects, we'll model the referenced schema with a class
} else if context == "OneOf" {
// don't substitute for references inside oneOf declarations
} else if resolvedRef.OneOf != nil {
// don't substitute for references that contain oneOf declarations
} else if resolvedRef.AdditionalProperties != nil {
// don't substitute for references that look like objects
} else {
schema.Ref = nil
schema.CopyProperties(resolvedRef)
count++
}
}
}, "")
}
}
// resolveJSONPointer resolves JSON pointers.
// This current implementation is very crude and custom for OpenAPI 2.0 schemas.
// It panics for any pointer that it is unable to resolve.
func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) {
parts := strings.Split(ref, "#")
if len(parts) == 2 {
documentName := parts[0] + "#"
if documentName == "#" && schema.ID != nil {
documentName = *(schema.ID)
}
path := parts[1]
document := schemas[documentName]
pathParts := strings.Split(path, "/")
// we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases
if len(pathParts) == 1 {
return document, nil
} else if len(pathParts) == 3 {
switch pathParts[1] {
case "definitions":
dictionary := document.Definitions
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
case "properties":
dictionary := document.Properties
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
default:
break
}
}
}
if result == nil {
return nil, fmt.Errorf("unresolved pointer: %+v", ref)
}
return result, nil
}
// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema.
func (schema *Schema) ResolveAllOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AllOf != nil {
for _, allOf := range *(schema.AllOf) {
schema.CopyProperties(allOf)
}
schema.AllOf = nil
}
}, "resolveAllOfs")
}
// ResolveAnyOfs replaces all "anyOf" elements with "oneOf".
func (schema *Schema) ResolveAnyOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AnyOf != nil {
schema.OneOf = schema.AnyOf
schema.AnyOf = nil
}
}, "resolveAnyOfs")
}
// return a pointer to a copy of a passed-in string
func stringptr(input string) (output *string) {
return &input
}
// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperty(name string) {
*schema.Properties = append(*schema.Properties,
NewNamedSchema(name,
&Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)}))
}
// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperties(names []string) {
for _, name := range names {
schema.CopyOfficialSchemaProperty(name)
}
}

View File

@@ -0,0 +1,442 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate go run generate-base.go
package jsonschema
import (
"fmt"
"io/ioutil"
"strconv"
"gopkg.in/yaml.v3"
)
// This is a global map of all known Schemas.
// It is initialized when the first Schema is created and inserted.
var schemas map[string]*Schema
// NewBaseSchema builds a schema object from an embedded json representation.
func NewBaseSchema() (schema *Schema, err error) {
b, err := baseSchemaBytes()
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(b, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromFile reads a schema from a file.
// Currently this assumes that schemas are stored in the source distribution of this project.
func NewSchemaFromFile(filename string) (schema *Schema, err error) {
file, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(file, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromObject constructs a schema from a parsed JSON object.
// Due to the complexity of the schema representation, this is a
// custom reader and not the standard Go JSON reader (encoding/json).
func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
switch jsonData.Kind {
case yaml.DocumentNode:
return NewSchemaFromObject(jsonData.Content[0])
case yaml.MappingNode:
schema := &Schema{}
for i := 0; i < len(jsonData.Content); i += 2 {
k := jsonData.Content[i].Value
v := jsonData.Content[i+1]
switch k {
case "$schema":
schema.Schema = schema.stringValue(v)
case "id":
schema.ID = schema.stringValue(v)
case "multipleOf":
schema.MultipleOf = schema.numberValue(v)
case "maximum":
schema.Maximum = schema.numberValue(v)
case "exclusiveMaximum":
schema.ExclusiveMaximum = schema.boolValue(v)
case "minimum":
schema.Minimum = schema.numberValue(v)
case "exclusiveMinimum":
schema.ExclusiveMinimum = schema.boolValue(v)
case "maxLength":
schema.MaxLength = schema.intValue(v)
case "minLength":
schema.MinLength = schema.intValue(v)
case "pattern":
schema.Pattern = schema.stringValue(v)
case "additionalItems":
schema.AdditionalItems = schema.schemaOrBooleanValue(v)
case "items":
schema.Items = schema.schemaOrSchemaArrayValue(v)
case "maxItems":
schema.MaxItems = schema.intValue(v)
case "minItems":
schema.MinItems = schema.intValue(v)
case "uniqueItems":
schema.UniqueItems = schema.boolValue(v)
case "maxProperties":
schema.MaxProperties = schema.intValue(v)
case "minProperties":
schema.MinProperties = schema.intValue(v)
case "required":
schema.Required = schema.arrayOfStringsValue(v)
case "additionalProperties":
schema.AdditionalProperties = schema.schemaOrBooleanValue(v)
case "properties":
schema.Properties = schema.mapOfSchemasValue(v)
case "patternProperties":
schema.PatternProperties = schema.mapOfSchemasValue(v)
case "dependencies":
schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v)
case "enum":
schema.Enumeration = schema.arrayOfEnumValuesValue(v)
case "type":
schema.Type = schema.stringOrStringArrayValue(v)
case "allOf":
schema.AllOf = schema.arrayOfSchemasValue(v)
case "anyOf":
schema.AnyOf = schema.arrayOfSchemasValue(v)
case "oneOf":
schema.OneOf = schema.arrayOfSchemasValue(v)
case "not":
schema.Not = NewSchemaFromObject(v)
case "definitions":
schema.Definitions = schema.mapOfSchemasValue(v)
case "title":
schema.Title = schema.stringValue(v)
case "description":
schema.Description = schema.stringValue(v)
case "default":
schema.Default = v
case "format":
schema.Format = schema.stringValue(v)
case "$ref":
schema.Ref = schema.stringValue(v)
default:
fmt.Printf("UNSUPPORTED (%s)\n", k)
}
}
// insert schema in global map
if schema.ID != nil {
if schemas == nil {
schemas = make(map[string]*Schema, 0)
}
schemas[*(schema.ID)] = schema
}
return schema
default:
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
return nil
}
return nil
}
//
// BUILDERS
// The following methods build elements of Schemas from interface{} values.
// Each returns nil if it is unable to build the desired element.
//
// Gets the string value of an interface{} value if possible.
func (schema *Schema) stringValue(v *yaml.Node) *string {
switch v.Kind {
case yaml.ScalarNode:
return &v.Value
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the numeric value of an interface{} value if possible.
func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber {
number := &SchemaNumber{}
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
number.Float = &v2
return number
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
number.Integer = &v2
return number
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the integer value of an interface{} value if possible.
func (schema *Schema) intValue(v *yaml.Node) *int64 {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
v3 := int64(v2)
return &v3
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
return &v2
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the bool value of an interface{} value if possible.
func (schema *Schema) boolValue(v *yaml.Node) *bool {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!bool":
v2, _ := strconv.ParseBool(v.Value)
return &v2
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a map of Schemas from an interface{} value if possible.
func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema {
switch v.Kind {
case yaml.MappingNode:
m := make([]*NamedSchema, 0)
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)}
m = append(m, pair)
}
return &m
default:
fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of Schemas from an interface{} value if possible.
func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2)
}
}
return &m
case yaml.MappingNode:
m := make([]*Schema, 0)
s := NewSchemaFromObject(v)
m = append(m, s)
return &m
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a Schema or an array of Schemas from an interface{} value if possible.
func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2)
}
}
return &SchemaOrSchemaArray{SchemaArray: &m}
case yaml.MappingNode:
s := NewSchemaFromObject(v)
return &SchemaOrSchemaArray{Schema: s}
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of strings from an interface{} value if possible.
func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string {
switch v.Kind {
case yaml.ScalarNode:
a := []string{v.Value}
return &a
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
return &a
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a string or an array of strings from an interface{} value if possible.
func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray {
switch v.Kind {
case yaml.ScalarNode:
s := &StringOrStringArray{}
s.String = &v.Value
return s
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
s := &StringOrStringArray{}
s.StringArray = &a
return s
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of enum values from an interface{} value if possible.
func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue {
a := make([]SchemaEnumValue, 0)
switch v.Kind {
case yaml.SequenceNode:
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
switch v2.Tag {
case "!!str":
a = append(a, SchemaEnumValue{String: &v2.Value})
case "!!bool":
v3, _ := strconv.ParseBool(v2.Value)
a = append(a, SchemaEnumValue{Bool: &v3})
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag)
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v)
}
return &a
}
// Gets a map of schemas or string arrays from an interface{} value if possible.
func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray {
m := make([]*NamedSchemaOrStringArray, 0)
switch v.Kind {
case yaml.MappingNode:
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
switch v2.Kind {
case yaml.SequenceNode:
a := make([]string, 0)
for _, v3 := range v2.Content {
switch v3.Kind {
case yaml.ScalarNode:
a = append(a, v3.Value)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3)
}
}
s := &SchemaOrStringArray{}
s.StringArray = &a
pair := &NamedSchemaOrStringArray{Name: k2, Value: s}
m = append(m, pair)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v)
}
return &m
}
// Gets a schema or a boolean value from an interface{} value if possible.
func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean {
schemaOrBoolean := &SchemaOrBoolean{}
switch v.Kind {
case yaml.ScalarNode:
v2, _ := strconv.ParseBool(v.Value)
schemaOrBoolean.Boolean = &v2
case yaml.MappingNode:
schemaOrBoolean.Schema = NewSchemaFromObject(v)
default:
fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v)
}
return schemaOrBoolean
}

View File

@@ -0,0 +1,150 @@
{
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "#/definitions/positiveInteger" },
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/positiveInteger" },
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}

View File

@@ -0,0 +1,369 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"gopkg.in/yaml.v3"
)
const indentation = " "
func renderMappingNode(node *yaml.Node, indent string) (result string) {
result = "{\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i += 2 {
// first print the key
key := node.Content[i].Value
result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key)
// then the value
value := node.Content[i+1]
switch value.Kind {
case yaml.ScalarNode:
result += "\"" + value.Value + "\""
case yaml.MappingNode:
result += renderMappingNode(value, innerIndent)
case yaml.SequenceNode:
result += renderSequenceNode(value, innerIndent)
default:
result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value)
}
if i < len(node.Content)-2 {
result += ","
}
result += "\n"
}
result += indent + "}"
return result
}
func renderSequenceNode(node *yaml.Node, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i++ {
item := node.Content[i]
switch item.Kind {
case yaml.ScalarNode:
result += innerIndent + "\"" + item.Value + "\""
case yaml.MappingNode:
result += innerIndent + renderMappingNode(item, innerIndent) + ""
default:
result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item)
}
if i < len(node.Content)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
func renderStringArray(array []string, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i, item := range array {
result += innerIndent + "\"" + item + "\""
if i < len(array)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
// Render renders a yaml.Node as JSON
func Render(node *yaml.Node) string {
if node.Kind == yaml.DocumentNode {
if len(node.Content) == 1 {
return Render(node.Content[0])
}
} else if node.Kind == yaml.MappingNode {
return renderMappingNode(node, "") + "\n"
} else if node.Kind == yaml.SequenceNode {
return renderSequenceNode(node, "") + "\n"
}
return ""
}
func (object *SchemaNumber) nodeValue() *yaml.Node {
if object.Integer != nil {
return nodeForInt64(*object.Integer)
} else if object.Float != nil {
return nodeForFloat64(*object.Float)
} else {
return nil
}
}
func (object *SchemaOrBoolean) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.Boolean != nil {
return nodeForBoolean(*object.Boolean)
} else {
return nil
}
}
func nodeForStringArray(array []string) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, nodeForString(item))
}
return nodeForSequence(content)
}
func nodeForSchemaArray(array []*Schema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func (object *StringOrStringArray) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrStringArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.SchemaArray != nil {
return nodeForSchemaArray(*(object.SchemaArray))
} else {
return nil
}
}
func (object *SchemaEnumValue) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.Bool != nil {
return nodeForBoolean(*object.Bool)
} else {
return nil
}
}
func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range *array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func nodeForMapping(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: content,
}
}
func nodeForSequence(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Content: content,
}
}
func nodeForString(value string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: value,
}
}
func nodeForBoolean(value bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", value),
}
}
func nodeForInt64(value int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", value),
}
}
func nodeForFloat64(value float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%f", value),
}
}
func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node {
nodes = append(nodes, nodeForString(name))
nodes = append(nodes, value)
return nodes
}
func (schema *Schema) nodeValue() *yaml.Node {
n := &yaml.Node{Kind: yaml.MappingNode}
content := make([]*yaml.Node, 0)
if schema.Title != nil {
content = appendPair(content, "title", nodeForString(*schema.Title))
}
if schema.ID != nil {
content = appendPair(content, "id", nodeForString(*schema.ID))
}
if schema.Schema != nil {
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
}
if schema.Type != nil {
content = appendPair(content, "type", schema.Type.nodeValue())
}
if schema.Items != nil {
content = appendPair(content, "items", schema.Items.nodeValue())
}
if schema.Description != nil {
content = appendPair(content, "description", nodeForString(*schema.Description))
}
if schema.Required != nil {
content = appendPair(content, "required", nodeForStringArray(*schema.Required))
}
if schema.AdditionalProperties != nil {
content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue())
}
if schema.PatternProperties != nil {
content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties))
}
if schema.Properties != nil {
content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties))
}
if schema.Dependencies != nil {
content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies))
}
if schema.Ref != nil {
content = appendPair(content, "$ref", nodeForString(*schema.Ref))
}
if schema.MultipleOf != nil {
content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue())
}
if schema.Maximum != nil {
content = appendPair(content, "maximum", schema.Maximum.nodeValue())
}
if schema.ExclusiveMaximum != nil {
content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
content = appendPair(content, "minimum", schema.Minimum.nodeValue())
}
if schema.ExclusiveMinimum != nil {
content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength))
}
if schema.MinLength != nil {
content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength))
}
if schema.Pattern != nil {
content = appendPair(content, "pattern", nodeForString(*schema.Pattern))
}
if schema.AdditionalItems != nil {
content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue())
}
if schema.MaxItems != nil {
content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems))
}
if schema.MinItems != nil {
content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems))
}
if schema.UniqueItems != nil {
content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems))
}
if schema.MaxProperties != nil {
content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties))
}
if schema.MinProperties != nil {
content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties))
}
if schema.Enumeration != nil {
content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration))
}
if schema.AllOf != nil {
content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf))
}
if schema.AnyOf != nil {
content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf))
}
if schema.OneOf != nil {
content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf))
}
if schema.Not != nil {
content = appendPair(content, "not", schema.Not.nodeValue())
}
if schema.Definitions != nil {
content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions))
}
if schema.Default != nil {
// m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default})
}
if schema.Format != nil {
content = appendPair(content, "format", nodeForString(*schema.Format))
}
n.Content = content
return n
}
// JSONString returns a json representation of a schema.
func (schema *Schema) JSONString() string {
node := schema.nodeValue()
return Render(node)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2020 Google LLC. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -41,6 +41,9 @@ option java_package = "org.openapi_v2";
// the future. 'GPB' is reserved for the protocol buffer implementation itself. // the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS"; option objc_class_prefix = "OAS";
// The Go package name.
option go_package = "openapiv2;openapi_v2";
message AdditionalPropertiesItem { message AdditionalPropertiesItem {
oneof oneof { oneof oneof {
Schema schema = 1; Schema schema = 1;
@@ -553,7 +556,7 @@ message Response {
repeated NamedAny vendor_extension = 5; repeated NamedAny vendor_extension = 5;
} }
// One or more JSON representations for parameters // One or more JSON representations for responses
message ResponseDefinitions { message ResponseDefinitions {
repeated NamedResponse additional_properties = 1; repeated NamedResponse additional_properties = 1;
} }

View File

@@ -1,16 +1,14 @@
# OpenAPI v2 Protocol Buffer Models # OpenAPI v2 Protocol Buffer Models
This directory contains a Protocol Buffer-language model This directory contains a Protocol Buffer-language model and related code for
and related code for supporting OpenAPI v2. supporting OpenAPI v2.
Gnostic applications and plugins can use OpenAPIv2.proto Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol
to generate Protocol Buffer support code for their preferred languages. Buffer support code for their preferred languages.
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
descriptions into the Protocol Buffer-based datastructures the Protocol Buffer-based datastructures generated from OpenAPIv2.proto.
generated from OpenAPIv2.proto.
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler
compiler generator, and OpenAPIv2.pb.go is generated by generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer
protoc, the Protocol Buffer compiler, and protoc-gen-go, the compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.
Protocol Buffer Go code generation plugin.

View File

@@ -0,0 +1,41 @@
// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openapi_v2
import (
"github.com/googleapis/gnostic/compiler"
"gopkg.in/yaml.v3"
)
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
func ParseDocument(b []byte) (*Document, error) {
info, err := compiler.ReadInfoFromBytes("", b)
if err != nil {
return nil, err
}
root := info.Content[0]
return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
}
// YAMLValue produces a serialized YAML representation of the document.
func (d *Document) YAMLValue(comment string) ([]byte, error) {
rawInfo := d.ToRawInfo()
rawInfo = &yaml.Node{
Kind: yaml.DocumentNode,
Content: []*yaml.Node{rawInfo},
HeadComment: comment,
}
return yaml.Marshal(rawInfo)
}

View File

@@ -203,7 +203,7 @@
"additionalProperties": { "additionalProperties": {
"$ref": "#/definitions/response" "$ref": "#/definitions/response"
}, },
"description": "One or more JSON representations for parameters" "description": "One or more JSON representations for responses"
}, },
"externalDocs": { "externalDocs": {
"type": "object", "type": "object",

View File

@@ -1 +1,3 @@
module github.com/hashicorp/golang-lru module github.com/hashicorp/golang-lru
go 1.12

View File

@@ -85,18 +85,52 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
return false, evicted return false, evicted
} }
// Remove removes the provided key from the cache. // PeekOrAdd checks if a key is in the cache without updating the
func (c *Cache) Remove(key interface{}) { // recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
c.lock.Lock() c.lock.Lock()
c.lru.Remove(key) defer c.lock.Unlock()
previous, ok = c.lru.Peek(key)
if ok {
return previous, true, false
}
evicted = c.lru.Add(key, value)
return nil, false, evicted
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) (present bool) {
c.lock.Lock()
present = c.lru.Remove(key)
c.lock.Unlock() c.lock.Unlock()
return
}
// Resize changes the cache size.
func (c *Cache) Resize(size int) (evicted int) {
c.lock.Lock()
evicted = c.lru.Resize(size)
c.lock.Unlock()
return evicted
} }
// RemoveOldest removes the oldest item from the cache. // RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() { func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
c.lock.Lock() c.lock.Lock()
c.lru.RemoveOldest() key, value, ok = c.lru.RemoveOldest()
c.lock.Unlock() c.lock.Unlock()
return
}
// GetOldest returns the oldest entry
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
c.lock.Lock()
key, value, ok = c.lru.GetOldest()
c.lock.Unlock()
return
} }
// Keys returns a slice of the keys in the cache, from oldest to newest. // Keys returns a slice of the keys in the cache, from oldest to newest.

View File

@@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok { if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent) c.evictList.MoveToFront(ent)
if ent.Value.(*entry) == nil {
return nil, false
}
return ent.Value.(*entry).value, true return ent.Value.(*entry).value, true
} }
return return
@@ -142,6 +145,19 @@ func (c *LRU) Len() int {
return c.evictList.Len() return c.evictList.Len()
} }
// Resize changes the cache size.
func (c *LRU) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache. // removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() { func (c *LRU) removeOldest() {
ent := c.evictList.Back() ent := c.evictList.Back()

View File

@@ -10,7 +10,7 @@ type LRUCache interface {
// updates the "recently used"-ness of the key. #value, isFound // updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool) Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness. // Checks if a key exists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool) Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key. // Returns key's value without updating the "recently used"-ness of the key.
@@ -31,6 +31,9 @@ type LRUCache interface {
// Returns the number of items in the cache. // Returns the number of items in the cache.
Len() int Len() int
// Clear all cache entries // Clears all cache entries.
Purge() Purge()
// Resizes cache, returning number evicted
Resize(int) int
} }

View File

@@ -1,44 +1,54 @@
# Mergo # Mergo
[![GoDoc][3]][4]
[![GitHub release][5]][6]
[![GoCard][7]][8]
[![Build Status][1]][2]
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
[![GoCenter Kudos][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg
[6]: https://github.com/imdario/mergo/releases
[7]: https://goreportcard.com/badge/imdario/mergo
[8]: https://goreportcard.com/report/github.com/imdario/mergo
[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[10]: https://coveralls.io/github/imdario/mergo?branch=master
[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
[16]: https://search.gocenter.io/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
## Status ## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
[![GoDoc][3]][4]
[![GoCard][5]][6]
[![Build Status][1]][2]
[![Coverage Status][7]][8]
[![Sourcegraph][9]][10]
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://goreportcard.com/badge/imdario/mergo
[6]: https://goreportcard.com/report/github.com/imdario/mergo
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[8]: https://coveralls.io/github/imdario/mergo?branch=master
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
### Latest release
[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
### Important note ### Important note
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations ### Donations
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> <a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
@@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search) - [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [janoszen/containerssh](https://github.com/janoszen/containerssh)
## Installation ## Install
go get github.com/imdario/mergo go get github.com/imdario/mergo
@@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
## Usage ## Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go ```go
if err := mergo.Merge(&dst, src); err != nil { if err := mergo.Merge(&dst, src); err != nil {
@@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil {
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). Here is a nice example:
### Nice example
```go ```go
package main package main
@@ -175,10 +184,10 @@ import (
"time" "time"
) )
type timeTransfomer struct { type timeTransformer struct {
} }
func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) { if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error { return func(dst, src reflect.Value) error {
if dst.CanSet() { if dst.CanSet() {
@@ -202,7 +211,7 @@ type Snapshot struct {
func main() { func main() {
src := Snapshot{time.Now()} src := Snapshot{time.Now()}
dest := Snapshot{} dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest) fmt.Println(dest)
// Will print // Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }

View File

@@ -4,41 +4,140 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
/* /*
Package mergo merges same-type structs and maps by setting default values in zero-value fields. A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Status
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
Important note
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
Install
Do your usual installation procedure:
go get github.com/imdario/mergo
// use in your .go code
import (
"github.com/imdario/mergo"
)
Usage Usage
From my own work-in-progress project: You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
type networkConfig struct { if err := mergo.Merge(&dst, src); err != nil {
Protocol string // ...
Address string
ServerType string `json: "server_type"`
Port uint16
} }
type FssnConfig struct { Also, you can merge overwriting values using the transformer WithOverride.
Network networkConfig
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
// ...
} }
var fssnDefault = FssnConfig { Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
networkConfig {
"tcp", if err := mergo.Map(&dst, srcMap); err != nil {
"127.0.0.1", // ...
"http",
31560,
},
} }
// Inside a function [...] Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
if err := mergo.Merge(&config, fssnDefault); err != nil { Here is a nice example:
log.Fatal(err)
package main
import (
"fmt"
"github.com/imdario/mergo"
)
type Foo struct {
A string
B int64
} }
// More code [...] func main() {
src := Foo{
A: "one",
B: 2,
}
dest := Foo{
A: "two",
}
mergo.Merge(&dest, src)
fmt.Println(dest)
// Will print
// {two 2}
}
Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
package main
import (
"fmt"
"github.com/imdario/mergo"
"reflect"
"time"
)
type timeTransformer struct {
}
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
isZero := dst.MethodByName("IsZero")
result := isZero.Call([]reflect.Value{})
if result[0].Bool() {
dst.Set(src)
}
}
return nil
}
}
return nil
}
type Snapshot struct {
Time time.Time
// ...
}
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
About
Written by Dario Castañé: https://da.rio.hn
License
BSD 3-Clause license, as Go language.
*/ */
package mergo package mergo

5
vendor/github.com/imdario/mergo/go.mod generated vendored Normal file
View File

@@ -0,0 +1,5 @@
module github.com/imdario/mergo
go 1.13
require gopkg.in/yaml.v2 v2.3.0

4
vendor/github.com/imdario/mergo/go.sum generated vendored Normal file
View File

@@ -0,0 +1,4 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
continue continue
} }
if srcKind == dstKind { if srcKind == dstKind {
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return return
} }
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return return
} }
} else if srcKind == reflect.Map { } else if srcKind == reflect.Map {
@@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
} }
func _map(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
}
var ( var (
vDst, vSrc reflect.Value vDst, vSrc reflect.Value
err error err error
@@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
// To be friction-less, we redirect equal-type arguments // To be friction-less, we redirect equal-type arguments
// to deepMerge. Only because arguments can be anything. // to deepMerge. Only because arguments can be anything.
if vSrc.Kind() == vDst.Kind() { if vSrc.Kind() == vDst.Kind() {
_, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
} }
switch vSrc.Kind() { switch vSrc.Kind() {
case reflect.Struct: case reflect.Struct:

View File

@@ -11,26 +11,26 @@ package mergo
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"unsafe"
) )
func hasExportedField(dst reflect.Value) (exported bool) { func hasMergeableFields(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ { for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i) field := dst.Type().Field(i)
if isExportedComponent(&field) { if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
return true exported = exported || hasMergeableFields(dst.Field(i))
} else if isExportedComponent(&field) {
exported = exported || len(field.PkgPath) == 0
} }
} }
return return
} }
func isExportedComponent(field *reflect.StructField) bool { func isExportedComponent(field *reflect.StructField) bool {
name := field.Name
pkgPath := field.PkgPath pkgPath := field.PkgPath
if len(pkgPath) > 0 { if len(pkgPath) > 0 {
return false return false
} }
c := name[0] c := field.Name[0]
if 'a' <= c && c <= 'z' || c == '_' { if 'a' <= c && c <= 'z' || c == '_' {
return false return false
} }
@@ -44,6 +44,8 @@ type Config struct {
Transformers Transformers Transformers Transformers
overwriteWithEmptyValue bool overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool overwriteSliceWithEmptyValue bool
sliceDeepCopy bool
debug bool
} }
type Transformers interface { type Transformers interface {
@@ -53,17 +55,16 @@ type Transformers interface {
// Traverses recursively both values, assigning src's fields values to dst. // Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows // The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types. // short circuiting on recursive types.
func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) { func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
dst = dstIn
overwrite := config.Overwrite overwrite := config.Overwrite
typeCheck := config.TypeCheck typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue overwriteWithEmptySrc := config.overwriteWithEmptyValue
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
sliceDeepCopy := config.sliceDeepCopy
if !src.IsValid() { if !src.IsValid() {
return return
} }
if dst.CanAddr() { if dst.CanAddr() {
addr := dst.UnsafeAddr() addr := dst.UnsafeAddr()
h := 17 * addr h := 17 * addr
@@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
typ := dst.Type() typ := dst.Type()
for p := seen; p != nil; p = p.next { for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ { if p.ptr == addr && p.typ == typ {
return dst, nil return nil
} }
} }
// Remember, remember... // Remember, remember...
@@ -85,126 +86,154 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
} }
} }
if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
return
}
switch dst.Kind() { switch dst.Kind() {
case reflect.Struct: case reflect.Struct:
if hasExportedField(dst) { if hasMergeableFields(dst) {
dstCp := reflect.New(dst.Type()).Elem()
for i, n := 0, dst.NumField(); i < n; i++ { for i, n := 0, dst.NumField(); i < n; i++ {
dstField := dst.Field(i) if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
structField := dst.Type().Field(i)
// copy un-exported struct fields
if !isExportedComponent(&structField) {
rf := dstCp.Field(i)
rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
dstRF := dst.Field(i)
if !dst.Field(i).CanAddr() {
continue
}
dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
rf.Set(dstRF)
continue
}
dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
if err != nil {
return return
} }
dstCp.Field(i).Set(dstField)
} }
if dst.CanSet() {
dst.Set(dstCp)
} else {
dst = dstCp
}
return
} else { } else {
if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
dst = src dst.Set(src)
} }
} }
case reflect.Map: case reflect.Map:
if dst.IsNil() && !src.IsNil() { if dst.IsNil() && !src.IsNil() {
if dst.CanSet() {
dst.Set(reflect.MakeMap(dst.Type())) dst.Set(reflect.MakeMap(dst.Type()))
} else { }
dst = src
if src.Kind() != reflect.Map {
if overwrite {
dst.Set(src)
}
return return
} }
}
for _, key := range src.MapKeys() { for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key) srcElement := src.MapIndex(key)
dstElement := dst.MapIndex(key)
if !srcElement.IsValid() { if !srcElement.IsValid() {
continue continue
} }
if dst.MapIndex(key).IsValid() { dstElement := dst.MapIndex(key)
k := dstElement.Interface() switch srcElement.Kind() {
dstElement = reflect.ValueOf(k) case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
} if srcElement.IsNil() {
if isReflectNil(srcElement) { if overwrite {
if overwrite || isReflectNil(dstElement) {
dst.SetMapIndex(key, srcElement) dst.SetMapIndex(key, srcElement)
} }
continue continue
} }
fallthrough
default:
if !srcElement.CanInterface() { if !srcElement.CanInterface() {
continue continue
} }
switch reflect.TypeOf(srcElement.Interface()).Kind() {
case reflect.Struct:
fallthrough
case reflect.Ptr:
fallthrough
case reflect.Map:
srcMapElm := srcElement
dstMapElm := dstElement
if srcMapElm.CanInterface() {
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
if dstMapElm.IsValid() {
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
}
}
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
return
}
case reflect.Slice:
srcSlice := reflect.ValueOf(srcElement.Interface())
var dstSlice reflect.Value
if !dstElement.IsValid() || dstElement.IsNil() {
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
} else {
dstSlice = reflect.ValueOf(dstElement.Interface())
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
} else if sliceDeepCopy {
i := 0
for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
srcElement := srcSlice.Index(i)
dstElement := dstSlice.Index(i)
if srcElement.CanInterface() { if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface()) srcElement = reflect.ValueOf(srcElement.Interface())
if dstElement.IsValid() { }
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface()) dstElement = reflect.ValueOf(dstElement.Interface())
} }
}
dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config) if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
if err != nil {
return return
} }
dst.SetMapIndex(key, dstElement) }
} }
dst.SetMapIndex(key, dstSlice)
}
}
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue
}
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
}
case reflect.Slice: case reflect.Slice:
newSlice := dst if !dst.CanSet() {
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
if typeCheck && src.Type() != dst.Type() {
return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
}
newSlice = src
} else if config.AppendSlice {
if typeCheck && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
return
}
newSlice = reflect.AppendSlice(dst, src)
}
if dst.CanSet() {
dst.Set(newSlice)
} else {
dst = newSlice
}
case reflect.Ptr, reflect.Interface:
if isReflectNil(src) {
break break
} }
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
if dst.IsNil() || overwrite {
if overwrite || isEmptyValue(dst) {
if dst.CanSet() {
dst.Set(src) dst.Set(src)
} else { } else if config.AppendSlice {
dst = src if src.Type() != dst.Type() {
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
}
dst.Set(reflect.AppendSlice(dst, src))
} else if sliceDeepCopy {
for i := 0; i < src.Len() && i < dst.Len(); i++ {
srcElement := src.Index(i)
dstElement := dst.Index(i)
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
}
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface())
}
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
} }
} }
} }
case reflect.Ptr:
fallthrough
case reflect.Interface:
if isReflectNil(src) {
if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
break break
} }
@@ -214,33 +243,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
dst.Set(src) dst.Set(src)
} }
} else if src.Kind() == reflect.Ptr { } else if src.Kind() == reflect.Ptr {
if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return return
} }
dst = dst.Addr()
} else if dst.Elem().Type() == src.Type() { } else if dst.Elem().Type() == src.Type() {
if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
return return
} }
} else { } else {
return dst, ErrDifferentArgumentsTypes return ErrDifferentArgumentsTypes
} }
break break
} }
if dst.IsNil() || overwrite { if dst.IsNil() || overwrite {
if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
if dst.CanSet() {
dst.Set(src) dst.Set(src)
} else {
dst = src
} }
break
} }
} else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
if dst.Elem().Kind() == src.Elem().Kind() {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return return
} }
break
}
default: default:
overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
if overwriteFull { if mustSet {
if dst.CanSet() { if dst.CanSet() {
dst.Set(src) dst.Set(src)
} else { } else {
@@ -281,6 +312,7 @@ func WithOverride(config *Config) {
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. // WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
func WithOverwriteWithEmptyValue(config *Config) { func WithOverwriteWithEmptyValue(config *Config) {
config.Overwrite = true
config.overwriteWithEmptyValue = true config.overwriteWithEmptyValue = true
} }
@@ -299,7 +331,16 @@ func WithTypeCheck(config *Config) {
config.TypeCheck = true config.TypeCheck = true
} }
// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
func WithSliceDeepCopy(config *Config) {
config.sliceDeepCopy = true
config.Overwrite = true
}
func merge(dst, src interface{}, opts ...func(*Config)) error { func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
}
var ( var (
vDst, vSrc reflect.Value vDst, vSrc reflect.Value
err error err error
@@ -314,14 +355,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
if vDst, vSrc, err = resolveValues(dst, src); err != nil { if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err return err
} }
if !vDst.CanSet() {
return fmt.Errorf("cannot set dst, needs reference")
}
if vDst.Type() != vSrc.Type() { if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes return ErrDifferentArgumentsTypes
} }
_, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
} }
// IsReflectNil is the reflect value provided nil // IsReflectNil is the reflect value provided nil

View File

@@ -20,6 +20,7 @@ var (
ErrNotSupported = errors.New("only structs and maps are supported") ErrNotSupported = errors.New("only structs and maps are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
) )
// During deepMerge, must keep track of checks that are // During deepMerge, must keep track of checks that are
@@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
} }
return return
} }
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
seen := visited[h]
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
return nil
}
}
// Remember, remember...
visited[h] = &visit{addr, typ, seen}
}
return // TODO refactor
}

View File

@@ -22,12 +22,12 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"net" "net"
"sync" "sync"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc" "google.golang.org/grpc"
@@ -219,17 +219,24 @@ func logGRPC(method string, request, reply interface{}, err error) {
Method string Method string
Request interface{} Request interface{}
Response interface{} Response interface{}
// Error as string, for backward compatibility.
// "" on no error.
Error string Error string
// Full error dump, to be able to parse out full gRPC error code and message separately in a test.
FullError error
}{ }{
Method: method, Method: method,
Request: request, Request: request,
Response: reply, Response: reply,
FullError: err,
} }
if err != nil { if err != nil {
logMessage.Error = err.Error() logMessage.Error = err.Error()
} }
msg, _ := json.Marshal(logMessage) msg, _ := json.Marshal(logMessage)
fmt.Printf("gRPCCall: %s\n", msg) klog.V(3).Infof("gRPCCall: %s\n", msg)
} }
func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) {

View File

@@ -234,6 +234,18 @@ func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListV
return ret0, ret1 return ret0, ret1
} }
func (m *MockControllerServer) ControllerGetVolume(arg0 context.Context, arg1 *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
ret := m.ctrl.Call(m, "ControllerGetVolume", arg0, arg1)
ret0, _ := ret[0].(*csi.ControllerGetVolumeResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ControllerGetVolume indicates an expected call of ControllerGetVolume
func (mr *MockControllerServerMockRecorder) ControllerGetVolume(arg0, arg1 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetVolume), arg0, arg1)
}
// ListVolumes indicates an expected call of ListVolumes // ListVolumes indicates an expected call of ListVolumes
func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1)

View File

@@ -19,7 +19,7 @@ package driver
import ( import (
"net" "net"
"github.com/kubernetes-csi/csi-test/utils" "github.com/kubernetes-csi/csi-test/v4/utils"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@@ -75,7 +75,7 @@ func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) {
} }
// Create a client connection // Create a client connection
m.conn, err = utils.Connect(m.Address()) m.conn, err = utils.Connect(m.Address(), grpc.WithInsecure())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -28,10 +28,7 @@ import (
) )
// Connect address by grpc // Connect address by grpc
func Connect(address string) (*grpc.ClientConn, error) { func Connect(address string, dialOptions ...grpc.DialOption) (*grpc.ClientConn, error) {
dialOptions := []grpc.DialOption{
grpc.WithInsecure(),
}
u, err := url.Parse(address) u, err := url.Parse(address)
if err == nil && (!u.IsAbs() || u.Scheme == "unix") { if err == nil && (!u.IsAbs() || u.Scheme == "unix") {
dialOptions = append(dialOptions, dialOptions = append(dialOptions,

View File

@@ -163,7 +163,7 @@ func (c *counter) updateExemplar(v float64, l Labels) {
// (e.g. number of HTTP requests, partitioned by response code and // (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec. // method). Create instances with NewCounterVec.
type CounterVec struct { type CounterVec struct {
*metricVec *MetricVec
} }
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@@ -176,11 +176,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
} }
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
@@ -188,7 +188,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
} }
// GetMetricWithLabelValues returns the Counter for the given slice of label // GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created. // label values is accessed for the first time, a new Counter is created.
// //
// It is possible to call this method without using the returned Counter to only // It is possible to call this method without using the returned Counter to only
@@ -202,7 +202,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// Counter with the same label values is created later. // Counter with the same label values is created later.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -211,7 +211,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@@ -219,19 +219,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
} }
// GetMetricWith returns the Counter for the given Labels map (the label names // GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of // accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are // creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@@ -275,7 +275,7 @@ func (v *CounterVec) With(labels Labels) Counter {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &CounterVec{vec}, err return &CounterVec{vec}, err
} }

View File

@@ -51,7 +51,7 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on // constLabelPairs contains precalculated DTO label pairs based on
// the constant labels. // the constant labels.
constLabelPairs []*dto.LabelPair constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric // variableLabels contains names of labels for which the metric
// maintains variable values. // maintains variable values.
variableLabels []string variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This // id is a hash of the values of the ConstLabels and fqName. This

View File

@@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error {
// (e.g. number of operations queued, partitioned by user and operation // (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec. // type). Create instances with NewGaugeVec.
type GaugeVec struct { type GaugeVec struct {
*metricVec *MetricVec
} }
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
} }
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
@@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
} }
// GetMetricWithLabelValues returns the Gauge for the given slice of label // GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created. // label values is accessed for the first time, a new Gauge is created.
// //
// It is possible to call this method without using the returned Gauge to only // It is possible to call this method without using the returned Gauge to only
@@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes // latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
} }
// GetMetricWith returns the Gauge for the given Labels map (the label names // GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of // accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are // creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &GaugeVec{vec}, err return &GaugeVec{vec}, err
} }

View File

@@ -58,9 +58,10 @@ type goCollector struct {
// collector will use the memstats from a previous collection if // collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously // runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection // collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved // will block until runtime.ReadMemStats succeeds.
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go //
// issue.) // NOTE: The problem is solved in Go 1.15, see
// https://github.com/golang/go/issues/19812 for the related Go issue.
func NewGoCollector() Collector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutinesDesc: NewDesc( goroutinesDesc: NewDesc(

View File

@@ -192,7 +192,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h := &histogram{ h := &histogram{
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}}, counts: [2]*histogramCounts{{}, {}},
now: time.Now, now: time.Now,
} }
@@ -409,7 +409,7 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec. // instances with NewHistogramVec.
type HistogramVec struct { type HistogramVec struct {
*metricVec *MetricVec
} }
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@@ -422,14 +422,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues returns the Histogram for the given slice of label // GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created. // label values is accessed for the first time, a new Histogram is created.
// //
// It is possible to call this method without using the returned Histogram to only // It is possible to call this method without using the returned Histogram to only
@@ -444,7 +444,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -453,7 +453,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -461,19 +461,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error)
} }
// GetMetricWith returns the Histogram for the given Labels map (the label names // GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of // accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use // creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues. // are the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -517,7 +517,7 @@ func (v *HistogramVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &HistogramVec{vec}, err return &HistogramVec{vec}, err
} }
@@ -602,7 +602,7 @@ func NewConstHistogram(
count: count, count: count,
sum: sum, sum: sum,
buckets: buckets, buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }

View File

@@ -89,7 +89,7 @@ type Opts struct {
// better covered by target labels set by the scraping Prometheus // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a // server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
} }

View File

@@ -110,7 +110,7 @@ type SummaryOpts struct {
// better covered by target labels set by the scraping Prometheus // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a // server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective // Objectives defines the quantile rank estimates with their respective
@@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
// Use the lock-free implementation of a Summary without objectives. // Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{ s := &noObjectivesSummary{
desc: desc, desc: desc,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{{}, {}}, counts: [2]*summaryCounts{{}, {}},
} }
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
@@ -221,7 +221,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
objectives: opts.Objectives, objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)), sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap), hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap),
@@ -513,7 +513,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
*metricVec *MetricVec
} }
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@@ -535,14 +535,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues returns the Summary for the given slice of label // GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created. // label values is accessed for the first time, a new Summary is created.
// //
// It is possible to call this method without using the returned Summary to only // It is possible to call this method without using the returned Summary to only
@@ -557,7 +557,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -566,7 +566,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -574,19 +574,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
} }
// GetMetricWith returns the Summary for the given Labels map (the label names // GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of // accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are // creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -630,7 +630,7 @@ func (v *SummaryVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &SummaryVec{vec}, err return &SummaryVec{vec}, err
} }
@@ -716,7 +716,7 @@ func NewConstSummary(
count: count, count: count,
sum: sum, sum: sum,
quantiles: quantiles, quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }

View File

@@ -63,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
desc: desc, desc: desc,
valType: valueType, valType: valueType,
function: function, function: function,
labelPairs: makeLabelPairs(desc, nil), labelPairs: MakeLabelPairs(desc, nil),
} }
result.init(result) result.init(result)
return result return result
@@ -95,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
desc: desc, desc: desc,
valType: valueType, valType: valueType,
val: value, val: value,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }
@@ -145,7 +145,14 @@ func populateMetric(
return nil return nil
} }
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { // MakeLabelPairs is a helper function to create protobuf LabelPairs from the
// variable and constant labels in the provided Desc. The values for the
// variable labels are defined by the labelValues slice, which must be in the
// same order as the corresponding variable labels in the Desc.
//
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 { if totalLen == 0 {
// Super fast path. // Super fast path.

View File

@@ -20,12 +20,20 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
// metricVec is a Collector to bundle metrics of the same name that differ in // MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore // their label values. MetricVec is not used directly but as a building block
// unexported). It is used as a building block for implementations of vectors of // for implementations of vectors of a given metric type, like GaugeVec,
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. // CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
// It also handles label currying. // used for custom Metric implementations.
type metricVec struct { //
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
// FooVec and initialize it with NewMetricVec. Implement wrappers for
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
// panic instead of returning errors. See also the MetricVec example.
type MetricVec struct {
*metricMap *metricMap
curry []curriedLabelValue curry []curriedLabelValue
@@ -35,9 +43,9 @@ type metricVec struct {
hashAddByte func(h uint64, b byte) uint64 hashAddByte func(h uint64, b byte) uint64
} }
// newMetricVec returns an initialized metricVec. // NewMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &metricVec{ return &MetricVec{
metricMap: &metricMap{ metricMap: &metricMap{
metrics: map[uint64][]metricWithLabelValues{}, metrics: map[uint64][]metricWithLabelValues{},
desc: desc, desc: desc,
@@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes // latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example. // See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool { func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return false return false
@@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
// //
// This method is used for the same purpose as DeleteLabelValues(...string). See // This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods. // there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool { func (m *MetricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return false return false
@@ -95,15 +103,32 @@ func (m *metricVec) Delete(labels Labels) bool {
// show up in GoDoc. // show up in GoDoc.
// Describe implements Collector. // Describe implements Collector.
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector. // Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector. // Reset deletes all metrics in this vector.
func (m *metricVec) Reset() { m.metricMap.Reset() } func (m *MetricVec) Reset() { m.metricMap.Reset() }
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { // CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the MetricVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
//
// Note that CurryWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
var ( var (
newCurry []curriedLabelValue newCurry []curriedLabelValue
oldCurry = m.curry oldCurry = m.curry
@@ -128,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l) return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
} }
return &metricVec{ return &MetricVec{
metricMap: m.metricMap, metricMap: m.metricMap,
curry: newCurry, curry: newCurry,
hashAdd: m.hashAdd, hashAdd: m.hashAdd,
@@ -136,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
}, nil }, nil
} }
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { // GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created (by
// calling the newMetric function provided during construction of the
// MetricVec).
//
// It is possible to call this method without using the returned Metry to only
// create the new Metric but leave it in its intitial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
//
// Note that GetMetricWithLabelValues is usually not called directly but through
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -145,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
} }
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { // GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
//
// Note that GetMetricWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -154,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
} }
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@@ -177,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil return h, nil
} }
func (m *metricVec) hashLabels(labels Labels) (uint64, error) { func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@@ -276,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
} }
if len(metrics) > 1 { if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else { } else {
delete(m.metrics, h) delete(m.metrics, h)
} }
@@ -302,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels(
} }
if len(metrics) > 1 { if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else { } else {
delete(m.metrics, h) delete(m.metrics, h)
} }

View File

@@ -32,7 +32,9 @@ import (
// in a no-op Registerer. // in a no-op Registerer.
// //
// WrapRegistererWith provides a way to add fixed labels to a subset of // WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed. // Collectors. It should not be used to add fixed labels to all metrics
// exposed. See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
// //
// Conflicts between Collectors registered through the original Registerer with // Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be // Collectors registered through the wrapping Registerer will still be

View File

@@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
} }
// ExtractSamples builds a slice of samples from the provided metric // ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurrs during sample extraction, it continues to // families. If an error occurs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last // extract from the remaining metric families. The returned error is the last
// error that has occurred. // error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {

View File

@@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil return nil
} }
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
return nil
}
}
return p.startLabelValue return p.startLabelValue
} }

View File

@@ -20,7 +20,7 @@ const (
prime64 = 1099511628211 prime64 = 1099511628211
) )
// hashNew initializies a new fnv64a hash value. // hashNew initializes a new fnv64a hash value.
func hashNew() uint64 { func hashNew() uint64 {
return offset64 return offset64
} }

View File

@@ -181,77 +181,77 @@ func (d *Duration) Type() string {
return "duration" return "duration"
} }
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
// ParseDuration parses a string into a time.Duration, assuming that a year // ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h. // always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) { func ParseDuration(durationStr string) (Duration, error) {
switch durationStr {
case "0":
// Allow 0 without a unit. // Allow 0 without a unit.
if durationStr == "0" {
return 0, nil return 0, nil
case "":
return 0, fmt.Errorf("empty duration string")
} }
matches := durationRE.FindStringSubmatch(durationStr) matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 { if matches == nil {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr) return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
} }
var ( var dur time.Duration
n, _ = strconv.Atoi(matches[1])
dur = time.Duration(n) * time.Millisecond // Parse the match at pos `pos` in the regex and use `mult` to turn that
) // into ms, then add that value to the total parsed duration.
switch unit := matches[2]; unit { m := func(pos int, mult time.Duration) {
case "y": if matches[pos] == "" {
dur *= 1000 * 60 * 60 * 24 * 365 return
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d":
dur *= 1000 * 60 * 60 * 24
case "h":
dur *= 1000 * 60 * 60
case "m":
dur *= 1000 * 60
case "s":
dur *= 1000
case "ms":
// Value already correct
default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
} }
n, _ := strconv.Atoi(matches[pos])
d := time.Duration(n) * time.Millisecond
dur += d * mult
}
m(2, 1000*60*60*24*365) // y
m(4, 1000*60*60*24*7) // w
m(6, 1000*60*60*24) // d
m(8, 1000*60*60) // h
m(10, 1000*60) // m
m(12, 1000) // s
m(14, 1) // ms
return Duration(dur), nil return Duration(dur), nil
} }
func (d Duration) String() string { func (d Duration) String() string {
var ( var (
ms = int64(time.Duration(d) / time.Millisecond) ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms" r = ""
) )
if ms == 0 { if ms == 0 {
return "0s" return "0s"
} }
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365, f := func(unit string, mult int64, exact bool) {
"w": 1000 * 60 * 60 * 24 * 7, if exact && ms%mult != 0 {
"d": 1000 * 60 * 60 * 24, return
"h": 1000 * 60 * 60, }
"m": 1000 * 60, if v := ms / mult; v > 0 {
"s": 1000, r += fmt.Sprintf("%d%s", v, unit)
"ms": 1, ms -= v * mult
}
} }
switch int64(0) { // Only format years and weeks if the remainder is zero, as it is often
case ms % factors["y"]: // easier to read 90d than 12w6d.
unit = "y" f("y", 1000*60*60*24*365, true)
case ms % factors["w"]: f("w", 1000*60*60*24*7, true)
unit = "w"
case ms % factors["d"]: f("d", 1000*60*60*24, false)
unit = "d" f("h", 1000*60*60, false)
case ms % factors["h"]: f("m", 1000*60, false)
unit = "h" f("s", 1000, false)
case ms % factors["m"]: f("ms", 1, false)
unit = "m"
case ms % factors["s"]: return r
unit = "s"
}
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
} }
// MarshalYAML implements the yaml.Marshaler interface. // MarshalYAML implements the yaml.Marshaler interface.

Some files were not shown because too many files have changed in this diff Show More