Update dependency go modules in client for k8s v1.26.0-rc.0
This commit is contained in:
@@ -3,9 +3,9 @@ module github.com/kubernetes-csi/external-snapshotter/client/v6
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
k8s.io/api v0.25.2
|
||||
k8s.io/apimachinery v0.25.2
|
||||
k8s.io/client-go v0.25.2
|
||||
k8s.io/api v0.26.0-rc.0
|
||||
k8s.io/apimachinery v0.26.0-rc.0
|
||||
k8s.io/client-go v0.26.0-rc.0
|
||||
k8s.io/code-generator v0.25.2
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
@@ -30,14 +30,14 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.1.1-0.20221027164007-c63010009c80 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
@@ -45,8 +45,8 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220913193501-391367153a38 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
|
@@ -64,7 +64,6 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
@@ -119,8 +118,8 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -167,8 +166,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -177,7 +176,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
@@ -234,8 +232,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -265,15 +263,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.1.1-0.20221027164007-c63010009c80 h1:CtRWmqbiPSOXwJV1JoY7pWiTx2xzVKQ813bvU+Y/9jI=
|
||||
golang.org/x/net v0.1.1-0.20221027164007-c63010009c80/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -313,11 +312,14 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -325,8 +327,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -375,8 +378,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -488,24 +491,23 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8=
|
||||
k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0=
|
||||
k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs=
|
||||
k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA=
|
||||
k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo=
|
||||
k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4=
|
||||
k8s.io/api v0.26.0-rc.0 h1:2/bCSv8uREflBfGU8dUMf1o0hwTT+dssZeNWS0l3qOk=
|
||||
k8s.io/api v0.26.0-rc.0/go.mod h1:zpIWCMJDnA2CZjZFc2XUHBuDe6zuLURCjyVy9cPR6pM=
|
||||
k8s.io/apimachinery v0.26.0-rc.0 h1:PModgniuCua1/ukXLN1zfaF6ypTKL/M2QA1geNRr9eI=
|
||||
k8s.io/apimachinery v0.26.0-rc.0/go.mod h1:VXMmlsE7YRJ5vyAyWpkKIfFkEbDNpVs0ObpkuQf1WfM=
|
||||
k8s.io/client-go v0.26.0-rc.0 h1:nxRrL8CVr6/PE170LCMcj4Va1A51lu/cpSOfmQgi+8c=
|
||||
k8s.io/client-go v0.26.0-rc.0/go.mod h1:4a6z7WzSFRLk1ckfC4R0Km7XgKuSkmByrZ4DTRyf6sg=
|
||||
k8s.io/code-generator v0.25.2 h1:qEHux0+E1c+j1MhsWn9+4Z6av8zrZBixOTPW064rSiY=
|
||||
k8s.io/code-generator v0.25.2/go.mod h1:f61OcU2VqVQcjt/6TrU0sta1TA5hHkOO6ZZPwkL9Eys=
|
||||
k8s.io/gengo v0.0.0-20220913193501-391367153a38 h1:yGN2TZt9XIl5wrcYaFtVMqzP2GIzX5gIcOObCZCuDeA=
|
||||
k8s.io/gengo v0.0.0-20220913193501-391367153a38/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea h1:3QOH5+2fGsY8e1qf+GIFpg+zw/JGNrgyZRQR7/m6uWg=
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs=
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
15
client/vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
client/vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
client/vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
client/vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
38
client/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
client/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
341
client/vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
client/vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
306
client/vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
client/vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
211
client/vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
client/vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
509
client/vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
client/vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
419
client/vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
client/vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
148
client/vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
client/vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
71
client/vendor/github.com/emicklei/go-restful/v3/.gitignore
generated
vendored
Normal file
71
client/vendor/github.com/emicklei/go-restful/v3/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
restful.html
|
||||
|
||||
*.out
|
||||
|
||||
tmp.prof
|
||||
|
||||
go-restful.test
|
||||
|
||||
examples/restful-basic-authentication
|
||||
|
||||
examples/restful-encoding-filter
|
||||
|
||||
examples/restful-filters
|
||||
|
||||
examples/restful-hello-world
|
||||
|
||||
examples/restful-resource-functions
|
||||
|
||||
examples/restful-serve-static
|
||||
|
||||
examples/restful-user-service
|
||||
|
||||
*.DS_Store
|
||||
examples/restful-user-resource
|
||||
|
||||
examples/restful-multi-containers
|
||||
|
||||
examples/restful-form-handling
|
||||
|
||||
examples/restful-CORS-filter
|
||||
|
||||
examples/restful-options-filter
|
||||
|
||||
examples/restful-curly-router
|
||||
|
||||
examples/restful-cpuprofiler-service
|
||||
|
||||
examples/restful-pre-post-filters
|
||||
|
||||
curly.prof
|
||||
|
||||
examples/restful-NCSA-logging
|
||||
|
||||
examples/restful-html-template
|
||||
|
||||
s.html
|
||||
restful-path-tail
|
||||
.idea
|
1
client/vendor/github.com/emicklei/go-restful/v3/.goconvey
generated
vendored
Normal file
1
client/vendor/github.com/emicklei/go-restful/v3/.goconvey
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
ignore
|
13
client/vendor/github.com/emicklei/go-restful/v3/.travis.yml
generated
vendored
Normal file
13
client/vendor/github.com/emicklei/go-restful/v3/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
before_install:
|
||||
- go test -v
|
||||
|
||||
script:
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
376
client/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
Normal file
376
client/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
Normal file
@@ -0,0 +1,376 @@
|
||||
# Change history of go-restful
|
||||
|
||||
## [v3.9.0] - 20221-07-21
|
||||
|
||||
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
||||
|
||||
## [v3.8.0] - 20221-06-06
|
||||
|
||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||
by changing the behaviour of the AllowedDomains setting in the CORS filter.
|
||||
To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc
|
||||
callback mechanism which is called when a simple domain match fails.
|
||||
- add test and fix for POST without body and Content-type, issue #492 (#496)
|
||||
- [Minor] Bad practice to have a mix of Receiver types. (#491)
|
||||
|
||||
## [v3.7.2] - 2021-11-24
|
||||
|
||||
- restored FilterChain (#482 by SVilgelm)
|
||||
|
||||
|
||||
## [v3.7.1] - 2021-10-04
|
||||
|
||||
- fix problem with contentEncodingEnabled setting (#479)
|
||||
|
||||
## [v3.7.0] - 2021-09-24
|
||||
|
||||
- feat(parameter): adds additional openapi mappings (#478)
|
||||
|
||||
## [v3.6.0] - 2021-09-18
|
||||
|
||||
- add support for vendor extensions (#477 thx erraggy)
|
||||
|
||||
## [v3.5.2] - 2021-07-14
|
||||
|
||||
- fix removing absent route from webservice (#472)
|
||||
|
||||
## [v3.5.1] - 2021-04-12
|
||||
|
||||
- fix handling no match access selected path
|
||||
- remove obsolete field
|
||||
|
||||
## [v3.5.0] - 2021-04-10
|
||||
|
||||
- add check for wildcard (#463) in CORS
|
||||
- add access to Route from Request, issue #459 (#462)
|
||||
|
||||
## [v3.4.0] - 2020-11-10
|
||||
|
||||
- Added OPTIONS to WebService
|
||||
|
||||
## [v3.3.2] - 2020-01-23
|
||||
|
||||
- Fixed duplicate compression in dispatch. #449
|
||||
|
||||
|
||||
## [v3.3.1] - 2020-08-31
|
||||
|
||||
- Added check on writer to prevent compression of response twice. #447
|
||||
|
||||
## [v3.3.0] - 2020-08-19
|
||||
|
||||
- Enable content encoding on Handle and ServeHTTP (#446)
|
||||
- List available representations in 406 body (#437)
|
||||
- Convert to string using rune() (#443)
|
||||
|
||||
## [v3.2.0] - 2020-06-21
|
||||
|
||||
- 405 Method Not Allowed must have Allow header (#436) (thx Bracken <abdawson@gmail.com>)
|
||||
- add field allowedMethodsWithoutContentType (#424)
|
||||
|
||||
## [v3.1.0]
|
||||
|
||||
- support describing response headers (#426)
|
||||
- fix openapi examples (#425)
|
||||
|
||||
v3.0.0
|
||||
|
||||
- fix: use request/response resulting from filter chain
|
||||
- add Go module
|
||||
Module consumer should use github.com/emicklei/go-restful/v3 as import path
|
||||
|
||||
v2.10.0
|
||||
|
||||
- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>)
|
||||
- fixed static example (thanks Arthur <yang_yapo@126.com>)
|
||||
- simplify code (thanks Christian Muehlhaeuser <muesli@gmail.com>)
|
||||
- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben <amim.knabben@gmail.com>)
|
||||
|
||||
v2.9.6
|
||||
|
||||
- small optimization in filter code
|
||||
|
||||
v2.11.1
|
||||
|
||||
- fix WriteError return value (#415)
|
||||
|
||||
v2.11.0
|
||||
|
||||
- allow prefix and suffix in path variable expression (#414)
|
||||
|
||||
v2.9.6
|
||||
|
||||
- support google custome verb (#413)
|
||||
|
||||
v2.9.5
|
||||
|
||||
- fix panic in Response.WriteError if err == nil
|
||||
|
||||
v2.9.4
|
||||
|
||||
- fix issue #400 , parsing mime type quality
|
||||
- Route Builder added option for contentEncodingEnabled (#398)
|
||||
|
||||
v2.9.3
|
||||
|
||||
- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
|
||||
|
||||
v2.9.2
|
||||
|
||||
- Reduce allocations in per-request methods to improve performance (#395)
|
||||
|
||||
v2.9.1
|
||||
|
||||
- Fix issue with default responses and invalid status code 0. (#393)
|
||||
|
||||
v2.9.0
|
||||
|
||||
- add per Route content encoding setting (overrides container setting)
|
||||
|
||||
v2.8.0
|
||||
|
||||
- add Request.QueryParameters()
|
||||
- add json-iterator (via build tag)
|
||||
- disable vgo module (until log is moved)
|
||||
|
||||
v2.7.1
|
||||
|
||||
- add vgo module
|
||||
|
||||
v2.6.1
|
||||
|
||||
- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
|
||||
|
||||
v2.6.0
|
||||
|
||||
- Make JSR 311 routing and path param processing consistent
|
||||
- Adding description to RouteBuilder.Reads()
|
||||
- Update example for Swagger12 and OpenAPI
|
||||
|
||||
2017-09-13
|
||||
|
||||
- added route condition functions using `.If(func)` in route building.
|
||||
|
||||
2017-02-16
|
||||
|
||||
- solved issue #304, make operation names unique
|
||||
|
||||
2017-01-30
|
||||
|
||||
[IMPORTANT] For swagger users, change your import statement to:
|
||||
swagger "github.com/emicklei/go-restful-swagger12"
|
||||
|
||||
- moved swagger 1.2 code to go-restful-swagger12
|
||||
- created TAG 2.0.0
|
||||
|
||||
2017-01-27
|
||||
|
||||
- remove defer request body close
|
||||
- expose Dispatch for testing filters and Routefunctions
|
||||
- swagger response model cannot be array
|
||||
- created TAG 1.0.0
|
||||
|
||||
2016-12-22
|
||||
|
||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
||||
|
||||
2016-11-26
|
||||
|
||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
||||
- Default change! no more caching of request content
|
||||
- Default change! do not recover from panics
|
||||
|
||||
2016-09-22
|
||||
|
||||
- fix the DefaultRequestContentType feature
|
||||
|
||||
2016-02-14
|
||||
|
||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
||||
- add constructors for custom entity accessors for xml and json
|
||||
|
||||
2015-09-27
|
||||
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
- customize rendering of ServiceError
|
||||
- JSR311 router now handles wildcards
|
||||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
- add tracing functionality (injectable) for debugging purposes
|
||||
- support JSON parse 64bit int
|
||||
- fix empty parameters for swagger
|
||||
- WebServicesUrl is now optional for swagger
|
||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
||||
- (api change) expose ServeMux in container
|
||||
- (api add) added AllowedDomains in CORS
|
||||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
|
||||
|
22
client/vendor/github.com/emicklei/go-restful/v3/LICENSE
generated
vendored
Normal file
22
client/vendor/github.com/emicklei/go-restful/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2012,2013 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
8
client/vendor/github.com/emicklei/go-restful/v3/Makefile
generated
vendored
Normal file
8
client/vendor/github.com/emicklei/go-restful/v3/Makefile
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
all: test
|
||||
|
||||
test:
|
||||
go vet .
|
||||
go test -cover -v .
|
||||
|
||||
ex:
|
||||
find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {}
|
111
client/vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
Normal file
111
client/vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
go-restful
|
||||
==========
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://travis-ci.org/emicklei/go-restful)
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful)
|
||||
[](https://codecov.io/gh/emicklei/go-restful)
|
||||
|
||||
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
||||
- PUT = Update if you are updating the full content of the specified resource.
|
||||
- DELETE = Delete if you are requesting the server to delete the resource
|
||||
- PATCH = Update partial content of a resource
|
||||
- OPTIONS = Get information about the communication options for the request URI
|
||||
|
||||
### Usage
|
||||
|
||||
#### Without Go Modules
|
||||
|
||||
All versions up to `v2.*.*` (on the master) are not supporting Go modules.
|
||||
|
||||
```
|
||||
import (
|
||||
restful "github.com/emicklei/go-restful"
|
||||
)
|
||||
```
|
||||
|
||||
#### Using Go Modules
|
||||
|
||||
As of version `v3.0.0` (on the v3 branch), this package supports Go modules.
|
||||
|
||||
```
|
||||
import (
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
)
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
```Go
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
||||
Doc("get a user").
|
||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
||||
Writes(User{}))
|
||||
...
|
||||
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go)
|
||||
|
||||
### Features
|
||||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support
|
||||
- Configurable router:
|
||||
- (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*})
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accessing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function
|
||||
|
||||
## How to customize
|
||||
There are several hooks to customize the behavior of the go-restful package.
|
||||
|
||||
- Router algorithm
|
||||
- Panic recovery
|
||||
- JSON decoder
|
||||
- Trace logging
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
||||
|
||||
## Resources
|
||||
|
||||
- [Example programs](./examples)
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
13
client/vendor/github.com/emicklei/go-restful/v3/SECURITY.md
generated
vendored
Normal file
13
client/vendor/github.com/emicklei/go-restful/v3/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| v3.7.x | :white_check_mark: |
|
||||
| < v3.0.1 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Create an Issue and put the label `[security]` in the title of the issue.
|
||||
Valid reported security issues are expected to be solved within a week.
|
1
client/vendor/github.com/emicklei/go-restful/v3/Srcfile
generated
vendored
Normal file
1
client/vendor/github.com/emicklei/go-restful/v3/Srcfile
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"SkipDirs": ["examples"]}
|
10
client/vendor/github.com/emicklei/go-restful/v3/bench_test.sh
generated
vendored
Normal file
10
client/vendor/github.com/emicklei/go-restful/v3/bench_test.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
127
client/vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
Normal file
127
client/vendor/github.com/emicklei/go-restful/v3/compress.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
||||
var EnableContentEncoding = false
|
||||
|
||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
||||
type CompressingResponseWriter struct {
|
||||
writer http.ResponseWriter
|
||||
compressor io.WriteCloser
|
||||
encoding string
|
||||
}
|
||||
|
||||
// Header is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) Header() http.Header {
|
||||
return c.writer.Header()
|
||||
}
|
||||
|
||||
// WriteHeader is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
||||
c.writer.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Write is part of http.ResponseWriter interface
|
||||
// It is passed through the compressor
|
||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
if c.isCompressorClosed() {
|
||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
||||
}
|
||||
return c.compressor.Write(bytes)
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
return errors.New("Compressing error: tried to close already closed compressor")
|
||||
}
|
||||
|
||||
c.compressor.Close()
|
||||
if ENCODING_GZIP == c.encoding {
|
||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
||||
}
|
||||
if ENCODING_DEFLATE == c.encoding {
|
||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
||||
}
|
||||
// gc hint needed?
|
||||
c.compressor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
||||
return nil == c.compressor
|
||||
}
|
||||
|
||||
// Hijack implements the Hijacker interface
|
||||
// This is especially useful when combining Container.EnabledContentEncoding
|
||||
// in combination with websockets (for instance gorilla/websocket)
|
||||
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
hijacker, ok := c.writer.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
|
||||
}
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||
// It also inspects the httpWriter whether its content-encoding is already set (non-empty).
|
||||
func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) {
|
||||
if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" {
|
||||
return false, ""
|
||||
}
|
||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||
gi := strings.Index(header, ENCODING_GZIP)
|
||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
||||
// use in order of appearance
|
||||
if gi == -1 {
|
||||
return zi != -1, ENCODING_DEFLATE
|
||||
} else if zi == -1 {
|
||||
return gi != -1, ENCODING_GZIP
|
||||
} else {
|
||||
if gi < zi {
|
||||
return true, ENCODING_GZIP
|
||||
}
|
||||
return true, ENCODING_DEFLATE
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
||||
c := new(CompressingResponseWriter)
|
||||
c.writer = httpWriter
|
||||
var err error
|
||||
if ENCODING_GZIP == encoding {
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_GZIP
|
||||
} else if ENCODING_DEFLATE == encoding {
|
||||
w := currentCompressorProvider.AcquireZlibWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_DEFLATE
|
||||
} else {
|
||||
return nil, errors.New("Unknown encoding:" + encoding)
|
||||
}
|
||||
return c, err
|
||||
}
|
103
client/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go
generated
vendored
Normal file
103
client/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
||||
// of writers and readers (resources).
|
||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
||||
type BoundedCachedCompressors struct {
|
||||
gzipWriters chan *gzip.Writer
|
||||
gzipReaders chan *gzip.Reader
|
||||
zlibWriters chan *zlib.Writer
|
||||
writersCapacity int
|
||||
readersCapacity int
|
||||
}
|
||||
|
||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
||||
b := &BoundedCachedCompressors{
|
||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
||||
writersCapacity: writersCapacity,
|
||||
readersCapacity: readersCapacity,
|
||||
}
|
||||
for ix := 0; ix < writersCapacity; ix++ {
|
||||
b.gzipWriters <- newGzipWriter()
|
||||
b.zlibWriters <- newZlibWriter()
|
||||
}
|
||||
for ix := 0; ix < readersCapacity; ix++ {
|
||||
b.gzipReaders <- newGzipReader()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
||||
var writer *gzip.Writer
|
||||
select {
|
||||
case writer, _ = <-b.gzipWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newGzipWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipWriters) < b.writersCapacity {
|
||||
b.gzipWriters <- w
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
||||
var reader *gzip.Reader
|
||||
select {
|
||||
case reader, _ = <-b.gzipReaders:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
reader = newGzipReader()
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipReaders) < b.readersCapacity {
|
||||
b.gzipReaders <- r
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
||||
var writer *zlib.Writer
|
||||
select {
|
||||
case writer, _ = <-b.zlibWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newZlibWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.zlibWriters) < b.writersCapacity {
|
||||
b.zlibWriters <- w
|
||||
}
|
||||
}
|
91
client/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go
generated
vendored
Normal file
91
client/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
||||
type SyncPoolCompessors struct {
|
||||
GzipWriterPool *sync.Pool
|
||||
GzipReaderPool *sync.Pool
|
||||
ZlibWriterPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
||||
return &SyncPoolCompessors{
|
||||
GzipWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipWriter() },
|
||||
},
|
||||
GzipReaderPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipReader() },
|
||||
},
|
||||
ZlibWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newZlibWriter() },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
s.GzipWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
s.GzipReaderPool.Put(r)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
s.ZlibWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func newGzipWriter() *gzip.Writer {
|
||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
func newGzipReader() *gzip.Reader {
|
||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
||||
b := new(bytes.Buffer)
|
||||
w.Reset(b)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
func newZlibWriter() *zlib.Writer {
|
||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
54
client/vendor/github.com/emicklei/go-restful/v3/compressors.go
generated
vendored
Normal file
54
client/vendor/github.com/emicklei/go-restful/v3/compressors.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an acquired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an acquired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an acquired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
||||
var currentCompressorProvider CompressorProvider
|
||||
|
||||
func init() {
|
||||
currentCompressorProvider = NewSyncPoolCompessors()
|
||||
}
|
||||
|
||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
||||
// It is initialized using a SyncPoolCompessors.
|
||||
func CurrentCompressorProvider() CompressorProvider {
|
||||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
}
|
||||
currentCompressorProvider = p
|
||||
}
|
30
client/vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
Normal file
30
client/vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
||||
|
||||
ENCODING_GZIP = "gzip"
|
||||
ENCODING_DEFLATE = "deflate"
|
||||
)
|
450
client/vendor/github.com/emicklei/go-restful/v3/container.go
generated
vendored
Normal file
450
client/vendor/github.com/emicklei/go-restful/v3/container.go
generated
vendored
Normal file
@@ -0,0 +1,450 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/v3/log"
|
||||
)
|
||||
|
||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||
// The requests are further dispatched to routes of WebServices using a RouteSelector
|
||||
type Container struct {
|
||||
webServicesLock sync.RWMutex
|
||||
webServices []*WebService
|
||||
ServeMux *http.ServeMux
|
||||
isRegisteredOnRoot bool
|
||||
containerFilters []FilterFunction
|
||||
doNotRecover bool // default is true
|
||||
recoverHandleFunc RecoverHandleFunction
|
||||
serviceErrorHandleFunc ServiceErrorHandleFunction
|
||||
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
|
||||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
ServeMux: http.NewServeMux(),
|
||||
isRegisteredOnRoot: false,
|
||||
containerFilters: []FilterFunction{},
|
||||
doNotRecover: true,
|
||||
recoverHandleFunc: logStackOnRecover,
|
||||
serviceErrorHandleFunc: writeServiceError,
|
||||
router: CurlyRouter{},
|
||||
contentEncodingEnabled: false}
|
||||
}
|
||||
|
||||
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
|
||||
// The first argument is what recover() returns. The second must be used to communicate an error response.
|
||||
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
|
||||
|
||||
// RecoverHandler changes the default function (logStackOnRecover) to be called
|
||||
// when a panic is detected. DoNotRecover must be have its default value (=false).
|
||||
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
|
||||
c.recoverHandleFunc = handler
|
||||
}
|
||||
|
||||
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
|
||||
// The first argument is the service error, the second is the request that resulted in the error and
|
||||
// the third must be used to communicate an error response.
|
||||
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
|
||||
|
||||
// ServiceErrorHandler changes the default function (writeServiceError) to be called
|
||||
// when a ServiceError is detected.
|
||||
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
|
||||
c.serviceErrorHandleFunc = handler
|
||||
}
|
||||
|
||||
// DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
// If set to true, Route functions are responsible for handling any error situation.
|
||||
// Default value is true.
|
||||
func (c *Container) DoNotRecover(doNot bool) {
|
||||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
|
||||
func (c *Container) EnableContentEncoding(enabled bool) {
|
||||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// If not registered on root then add specific mapping
|
||||
if !c.isRegisteredOnRoot {
|
||||
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
// addHandler may set a new HandleFunc for the serveMux
|
||||
// this function must run inside the critical region protected by the webServicesLock.
|
||||
// returns true if the function was registered on root ("/")
|
||||
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
|
||||
pattern := fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
serveMux.HandleFunc("/", c.dispatch)
|
||||
return true
|
||||
}
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
alreadyMapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
serveMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
serveMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
if c.ServeMux == http.DefaultServeMux {
|
||||
errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
|
||||
log.Print(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// build a new ServeMux and re-register all WebServices
|
||||
newServeMux := http.NewServeMux()
|
||||
newServices := []*WebService{}
|
||||
newIsRegisteredOnRoot := false
|
||||
for _, each := range c.webServices {
|
||||
if each.rootPath != ws.rootPath {
|
||||
// If not registered on root then add specific mapping
|
||||
if !newIsRegisteredOnRoot {
|
||||
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
|
||||
}
|
||||
newServices = append(newServices, each)
|
||||
}
|
||||
}
|
||||
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStackOnRecover is the default RecoverHandleFunction and is called
|
||||
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
|
||||
// Default implementation logs the stacktrace and writes the stacktrace on the response.
|
||||
// This may be a security issue as it exposes sourcecode information.
|
||||
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i += 1 {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
log.Print(buffer.String())
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
httpWriter.Write(buffer.Bytes())
|
||||
}
|
||||
|
||||
// writeServiceError is the default ServiceErrorHandleFunction and is called
|
||||
// when a ServiceError is returned during route selection. Default implementation
|
||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||
for header, values := range err.Header {
|
||||
for _, value := range values {
|
||||
resp.Header().Add(header, value)
|
||||
}
|
||||
}
|
||||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
// so we can assign a compressing one later
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Instal panic recovery unless told otherwise
|
||||
if !c.doNotRecover { // catch all for 500 response
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.recoverHandleFunc(r, writer)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Find best match Route ; err is non nil if no match was found
|
||||
var webService *WebService
|
||||
var route *Route
|
||||
var err error
|
||||
func() {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
webService, route, err = c.router.SelectRoute(
|
||||
c.webServices,
|
||||
httpRequest)
|
||||
}()
|
||||
if err != nil {
|
||||
// a non-200 response (may be compressed) has already been written
|
||||
// run container filters anyway ; they should not touch the response...
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
ser := err.(ServiceError)
|
||||
c.serviceErrorHandleFunc(ser, req, resp)
|
||||
}
|
||||
// TODO
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||
return
|
||||
}
|
||||
|
||||
// Unless httpWriter is already an CompressingResponseWriter see if we need to install one
|
||||
if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing {
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
contentEncodingEnabled := c.contentEncodingEnabled
|
||||
if route != nil && route.contentEncodingEnabled != nil {
|
||||
contentEncodingEnabled = *route.contentEncodingEnabled
|
||||
}
|
||||
if contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pathProcessor, routerProcessesPath := c.router.(PathProcessor)
|
||||
if !routerProcessesPath {
|
||||
pathProcessor = defaultPathProcessor{}
|
||||
}
|
||||
pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
|
||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
|
||||
// pass through filters (if any)
|
||||
if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 {
|
||||
// compose filter chain
|
||||
allFilters := make([]FilterFunction, 0, size)
|
||||
allFilters = append(allFilters, c.containerFilters...)
|
||||
allFilters = append(allFilters, webService.filters...)
|
||||
allFilters = append(allFilters, route.Filters...)
|
||||
chain := FilterChain{
|
||||
Filters: allFilters,
|
||||
Target: route.Function,
|
||||
ParameterDocs: route.ParameterDocs,
|
||||
Operation: route.Operation,
|
||||
}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// no filters, handle request by route
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
}
|
||||
return pathspec[:varBegin]
|
||||
}
|
||||
|
||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||
func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
// Skip, if content encoding is disabled
|
||||
if !c.contentEncodingEnabled {
|
||||
c.ServeMux.ServeHTTP(httpWriter, httpRequest)
|
||||
return
|
||||
}
|
||||
// content encoding is enabled
|
||||
|
||||
// Skip, if httpWriter is already an CompressingResponseWriter
|
||||
if _, ok := httpWriter.(*CompressingResponseWriter); ok {
|
||||
c.ServeMux.ServeHTTP(httpWriter, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
writer := httpWriter
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.ServeMux.ServeHTTP(writer, httpRequest)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||
func (c *Container) Handle(pattern string, handler http.Handler) {
|
||||
c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
// Skip, if httpWriter is already an CompressingResponseWriter
|
||||
if _, ok := httpWriter.(*CompressingResponseWriter); ok {
|
||||
handler.ServeHTTP(httpWriter, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if c.contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
handler.ServeHTTP(writer, httpRequest)
|
||||
}))
|
||||
}
|
||||
|
||||
// HandleWithFilter registers the handler for the given pattern.
|
||||
// Container's filter chain is applied for handler.
|
||||
// If a handler already exists for pattern, HandleWithFilter panics.
|
||||
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
||||
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
|
||||
if len(c.containerFilters) == 0 {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
handler.ServeHTTP(resp, req.Request)
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||
}
|
||||
|
||||
c.Handle(pattern, http.HandlerFunc(f))
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction. These are called before dispatching
|
||||
// a http.Request to a WebService from the container
|
||||
func (c *Container) Filter(filter FilterFunction) {
|
||||
c.containerFilters = append(c.containerFilters, filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of added WebServices
|
||||
func (c *Container) RegisteredWebServices() []*WebService {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
result := make([]*WebService, len(c.webServices))
|
||||
for ix := range c.webServices {
|
||||
result[ix] = c.webServices[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
|
||||
func (c *Container) computeAllowedMethods(req *Request) []string {
|
||||
// Go through all RegisteredWebServices() and all its Routes to collect the options
|
||||
methods := []string{}
|
||||
requestPath := req.Request.URL.Path
|
||||
for _, ws := range c.RegisteredWebServices() {
|
||||
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
finalMatch := matches[len(matches)-1]
|
||||
for _, rt := range ws.Routes() {
|
||||
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
methods = append(methods, rt.Method)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// methods = append(methods, "OPTIONS") not sure about this
|
||||
return methods
|
||||
}
|
||||
|
||||
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
|
||||
// It is basic because no parameter or (produces) content-type information is given.
|
||||
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
resp := NewResponse(httpWriter)
|
||||
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
return NewRequest(httpRequest), resp
|
||||
}
|
193
client/vendor/github.com/emicklei/go-restful/v3/cors_filter.go
generated
vendored
Normal file
193
client/vendor/github.com/emicklei/go-restful/v3/cors_filter.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
|
||||
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
|
||||
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
|
||||
//
|
||||
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
||||
// http://enable-cors.org/server.html
|
||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
||||
type CrossOriginResourceSharing struct {
|
||||
ExposeHeaders []string // list of Header names
|
||||
|
||||
// AllowedHeaders is alist of Header names. Checking is case-insensitive.
|
||||
// The list may contain the special wildcard string ".*" ; all is allowed
|
||||
AllowedHeaders []string
|
||||
|
||||
// AllowedDomains is a list of allowed values for Http Origin.
|
||||
// The list may contain the special wildcard string ".*" ; all is allowed
|
||||
// If empty all are allowed.
|
||||
AllowedDomains []string
|
||||
|
||||
// AllowedDomainFunc is optional and is a function that will do the check
|
||||
// when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*".
|
||||
AllowedDomainFunc func(origin string) bool
|
||||
|
||||
// AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive.
|
||||
AllowedMethods []string
|
||||
MaxAge int // number of seconds before requiring new Options request
|
||||
CookiesAllowed bool
|
||||
Container *Container
|
||||
|
||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
||||
}
|
||||
|
||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
||||
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
|
||||
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if len(origin) == 0 {
|
||||
if trace {
|
||||
traceLogger.Print("no Http header Origin set")
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
||||
if trace {
|
||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if req.Request.Method != "OPTIONS" {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
|
||||
c.doPreflightRequest(req, resp)
|
||||
} else {
|
||||
c.doActualRequest(req, resp)
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
|
||||
c.setOptionsHeaders(req, resp)
|
||||
// continue processing the response
|
||||
}
|
||||
|
||||
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
|
||||
if len(c.AllowedMethods) == 0 {
|
||||
if c.Container == nil {
|
||||
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
|
||||
} else {
|
||||
c.AllowedMethods = c.Container.computeAllowedMethods(req)
|
||||
}
|
||||
}
|
||||
|
||||
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
|
||||
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestMethod,
|
||||
acrm,
|
||||
c.AllowedMethods)
|
||||
}
|
||||
return
|
||||
}
|
||||
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
if len(acrhs) > 0 {
|
||||
for _, each := range strings.Split(acrhs, ",") {
|
||||
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
|
||||
if trace {
|
||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
||||
HEADER_AccessControlRequestHeaders,
|
||||
acrhs,
|
||||
c.AllowedHeaders)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
|
||||
c.setOptionsHeaders(req, resp)
|
||||
|
||||
// return http 200 response, no body
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
|
||||
c.checkAndSetExposeHeaders(resp)
|
||||
c.setAllowOriginHeader(req, resp)
|
||||
c.checkAndSetAllowCredentials(resp)
|
||||
if c.MaxAge > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
||||
if len(origin) == 0 {
|
||||
return false
|
||||
}
|
||||
lowerOrigin := strings.ToLower(origin)
|
||||
if len(c.AllowedDomains) == 0 {
|
||||
if c.AllowedDomainFunc != nil {
|
||||
return c.AllowedDomainFunc(lowerOrigin)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// exact match on each allowed domain
|
||||
for _, domain := range c.AllowedDomains {
|
||||
if domain == ".*" || strings.ToLower(domain) == lowerOrigin {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if c.AllowedDomainFunc != nil {
|
||||
return c.AllowedDomainFunc(origin)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
if c.isOriginAllowed(origin) {
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
|
||||
if len(c.ExposeHeaders) > 0 {
|
||||
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
|
||||
if c.CookiesAllowed {
|
||||
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
|
||||
for _, each := range allowedMethods {
|
||||
if each == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
|
||||
for _, each := range c.AllowedHeaders {
|
||||
if strings.ToLower(each) == strings.ToLower(header) {
|
||||
return true
|
||||
}
|
||||
if each == "*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
2
client/vendor/github.com/emicklei/go-restful/v3/coverage.sh
generated
vendored
Normal file
2
client/vendor/github.com/emicklei/go-restful/v3/coverage.sh
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
173
client/vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
Normal file
173
client/vendor/github.com/emicklei/go-restful/v3/curly.go
generated
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
|
||||
type CurlyRouter struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (c CurlyRouter) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
|
||||
|
||||
requestTokens := tokenizePath(httpRequest.URL.Path)
|
||||
|
||||
detectedService := c.detectWebService(requestTokens, webServices)
|
||||
if detectedService == nil {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
|
||||
}
|
||||
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
|
||||
if len(candidateRoutes) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
|
||||
}
|
||||
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
|
||||
if selectedRoute == nil {
|
||||
return detectedService, nil, err
|
||||
}
|
||||
return detectedService, selectedRoute, nil
|
||||
}
|
||||
|
||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||
for _, each := range ws.routes {
|
||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
||||
if matches {
|
||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||
}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
return candidates
|
||||
}
|
||||
|
||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) {
|
||||
if len(routeTokens) < len(requestTokens) {
|
||||
// proceed in matching only if last routeToken is wildcard
|
||||
count := len(routeTokens)
|
||||
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
|
||||
return false, 0, 0
|
||||
}
|
||||
// proceed
|
||||
}
|
||||
for i, routeToken := range routeTokens {
|
||||
if i == len(requestTokens) {
|
||||
// reached end of request path
|
||||
return false, 0, 0
|
||||
}
|
||||
requestToken := requestTokens[i]
|
||||
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
||||
if !isMatchCustomVerb(routeToken, requestToken) {
|
||||
return false, 0, 0
|
||||
}
|
||||
staticCount++
|
||||
requestToken = removeCustomVerb(requestToken)
|
||||
routeToken = removeCustomVerb(routeToken)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(routeToken, "{") {
|
||||
paramCount++
|
||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
||||
// match by regex
|
||||
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
|
||||
if !matchesToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
if matchesRemainder {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else { // no { prefix
|
||||
if requestToken != routeToken {
|
||||
return false, 0, 0
|
||||
}
|
||||
staticCount++
|
||||
}
|
||||
}
|
||||
return true, paramCount, staticCount
|
||||
}
|
||||
|
||||
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
|
||||
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
|
||||
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
|
||||
regPart := routeToken[colon+1 : len(routeToken)-1]
|
||||
if regPart == "*" {
|
||||
if trace {
|
||||
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
|
||||
}
|
||||
return true, true
|
||||
}
|
||||
matched, err := regexp.MatchString(regPart, requestToken)
|
||||
return (matched && err == nil), false
|
||||
}
|
||||
|
||||
var jsr311Router = RouterJSR311{}
|
||||
|
||||
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
|
||||
// headers of the Request. See also RouterJSR311 in jsr311.go
|
||||
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
|
||||
// tracing is done inside detectRoute
|
||||
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
|
||||
}
|
||||
|
||||
// detectWebService returns the best matching webService given the list of path tokens.
|
||||
// see also computeWebserviceScore
|
||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
||||
var best *WebService
|
||||
score := -1
|
||||
for _, each := range webServices {
|
||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
||||
if matches && (eachScore > score) {
|
||||
best = each
|
||||
score = eachScore
|
||||
}
|
||||
}
|
||||
return best
|
||||
}
|
||||
|
||||
// computeWebserviceScore returns whether tokens match and
|
||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
||||
if len(tokens) > len(requestTokens) {
|
||||
return false, 0
|
||||
}
|
||||
score := 0
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
each := requestTokens[i]
|
||||
other := tokens[i]
|
||||
if len(each) == 0 && len(other) == 0 {
|
||||
score++
|
||||
continue
|
||||
}
|
||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
||||
// no empty match
|
||||
if len(each) == 0 {
|
||||
return false, score
|
||||
}
|
||||
score += 1
|
||||
} else {
|
||||
// not a parameter
|
||||
if each != other {
|
||||
return false, score
|
||||
}
|
||||
score += (len(tokens) - i) * 10 //fuzzy
|
||||
}
|
||||
}
|
||||
return true, score
|
||||
}
|
54
client/vendor/github.com/emicklei/go-restful/v3/curly_route.go
generated
vendored
Normal file
54
client/vendor/github.com/emicklei/go-restful/v3/curly_route.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
|
||||
type curlyRoute struct {
|
||||
route Route
|
||||
paramCount int
|
||||
staticCount int
|
||||
}
|
||||
|
||||
// sortableCurlyRoutes orders by most parameters and path elements first.
|
||||
type sortableCurlyRoutes []curlyRoute
|
||||
|
||||
func (s *sortableCurlyRoutes) add(route curlyRoute) {
|
||||
*s = append(*s, route)
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) routes() (routes []Route) {
|
||||
routes = make([]Route, 0, len(s))
|
||||
for _, each := range s {
|
||||
routes = append(routes, each.route) // TODO change return type
|
||||
}
|
||||
return routes
|
||||
}
|
||||
|
||||
func (s sortableCurlyRoutes) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortableCurlyRoutes) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortableCurlyRoutes) Less(i, j int) bool {
|
||||
a := s[j]
|
||||
b := s[i]
|
||||
|
||||
// primary key
|
||||
if a.staticCount < b.staticCount {
|
||||
return true
|
||||
}
|
||||
if a.staticCount > b.staticCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if a.paramCount < b.paramCount {
|
||||
return true
|
||||
}
|
||||
if a.paramCount > b.paramCount {
|
||||
return false
|
||||
}
|
||||
return a.route.Path < b.route.Path
|
||||
}
|
29
client/vendor/github.com/emicklei/go-restful/v3/custom_verb.go
generated
vendored
Normal file
29
client/vendor/github.com/emicklei/go-restful/v3/custom_verb.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
|
||||
)
|
||||
|
||||
func hasCustomVerb(routeToken string) bool {
|
||||
return customVerbReg.MatchString(routeToken)
|
||||
}
|
||||
|
||||
func isMatchCustomVerb(routeToken string, pathToken string) bool {
|
||||
rs := customVerbReg.FindStringSubmatch(routeToken)
|
||||
if len(rs) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
customVerb := rs[1]
|
||||
specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb))
|
||||
return specificVerbReg.MatchString(pathToken)
|
||||
}
|
||||
|
||||
func removeCustomVerb(str string) string {
|
||||
return customVerbReg.ReplaceAllString(str, "")
|
||||
}
|
185
client/vendor/github.com/emicklei/go-restful/v3/doc.go
generated
vendored
Normal file
185
client/vendor/github.com/emicklei/go-restful/v3/doc.go
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
/*
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
|
||||
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
|
||||
WebServices must be added to a container (see below) in order to handler Http requests from a server.
|
||||
|
||||
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
|
||||
This package has the logic to find the best matching Route and if found, call its Function.
|
||||
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_JSON, restful.MIME_XML).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
|
||||
|
||||
...
|
||||
|
||||
// GET http://localhost:8080/users/1
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
|
||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation.
|
||||
|
||||
Regular expression matching Routes
|
||||
|
||||
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
|
||||
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
|
||||
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
|
||||
This feature requires the use of a CurlyRouter.
|
||||
|
||||
Containers
|
||||
|
||||
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
|
||||
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
|
||||
The Default container of go-restful uses the http.DefaultServeMux.
|
||||
You can create your own Container and create a new http.Server for that particular container.
|
||||
|
||||
container := restful.NewContainer()
|
||||
server := &http.Server{Addr: ":8081", Handler: container}
|
||||
|
||||
Filters
|
||||
|
||||
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
|
||||
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
|
||||
In the restful package there are three hooks into the request,response flow where filters can be added.
|
||||
Each filter must define a FilterFunction:
|
||||
|
||||
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
|
||||
|
||||
Use the following statement to pass the request,response pair to the next filter or RouteFunction
|
||||
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
||||
Container Filters
|
||||
|
||||
These are processed before any registered WebService.
|
||||
|
||||
// install a (global) filter for the default container (processed before any webservice)
|
||||
restful.Filter(globalLogging)
|
||||
|
||||
WebService Filters
|
||||
|
||||
These are processed before any Route of a WebService.
|
||||
|
||||
// install a webservice filter (processed before any route)
|
||||
ws.Filter(webserviceLogging).Filter(measureTime)
|
||||
|
||||
|
||||
Route Filters
|
||||
|
||||
These are processed before calling the function associated with the Route.
|
||||
|
||||
// install 2 chained route filters (processed before calling findUser)
|
||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations.
|
||||
|
||||
Response Encoding
|
||||
|
||||
Two encodings are supported: gzip and deflate. To enable this for all responses:
|
||||
|
||||
restful.DefaultContainer.EnableContentEncoding(true)
|
||||
|
||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
||||
|
||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go
|
||||
|
||||
OPTIONS support
|
||||
|
||||
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
|
||||
|
||||
Filter(OPTIONSFilter())
|
||||
|
||||
CORS
|
||||
|
||||
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
|
||||
|
||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
||||
Filter(cors.Filter)
|
||||
|
||||
Error Handling
|
||||
|
||||
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
|
||||
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
|
||||
|
||||
400: Bad Request
|
||||
|
||||
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
|
||||
|
||||
404: Not Found
|
||||
|
||||
Despite a valid URI, the resource requested may not be available
|
||||
|
||||
500: Internal Server Error
|
||||
|
||||
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
|
||||
|
||||
405: Method Not Allowed
|
||||
|
||||
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
|
||||
|
||||
406: Not Acceptable
|
||||
|
||||
The request does not have or has an unknown Accept Header set for this operation.
|
||||
|
||||
415: Unsupported Media Type
|
||||
|
||||
The request does not have or has an unknown Content-Type Header set for this operation.
|
||||
|
||||
ServiceError
|
||||
|
||||
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
|
||||
|
||||
Performance options
|
||||
|
||||
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
|
||||
|
||||
restful.DefaultContainer.DoNotRecover(false)
|
||||
|
||||
DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
If set to false, the container will recover from panics.
|
||||
Default value is true
|
||||
|
||||
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
||||
|
||||
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
|
||||
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
|
||||
|
||||
Trouble shooting
|
||||
|
||||
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
|
||||
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
|
||||
|
||||
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
|
||||
|
||||
Logging
|
||||
|
||||
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
|
||||
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
|
||||
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
|
||||
preferred package is simple.
|
||||
|
||||
Resources
|
||||
|
||||
[project]: https://github.com/emicklei/go-restful
|
||||
|
||||
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
|
||||
|
||||
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
|
||||
|
||||
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
|
||||
|
||||
(c) 2012-2015, http://ernestmicklei.com. MIT License
|
||||
*/
|
||||
package restful
|
162
client/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
Normal file
162
client/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
||||
type EntityReaderWriter interface {
|
||||
// Read a serialized version of the value from the request.
|
||||
// The Request may have a decompressing reader. Depends on Content-Encoding.
|
||||
Read(req *Request, v interface{}) error
|
||||
|
||||
// Write a serialized version of the value on the response.
|
||||
// The Response may have a compressing writer. Depends on Accept-Encoding.
|
||||
// status should be a valid Http Status code
|
||||
Write(resp *Response, status int, v interface{}) error
|
||||
}
|
||||
|
||||
// entityAccessRegistry is a singleton
|
||||
var entityAccessRegistry = &entityReaderWriters{
|
||||
protection: new(sync.RWMutex),
|
||||
accessors: map[string]EntityReaderWriter{},
|
||||
}
|
||||
|
||||
// entityReaderWriters associates MIME to an EntityReaderWriter
|
||||
type entityReaderWriters struct {
|
||||
protection *sync.RWMutex
|
||||
accessors map[string]EntityReaderWriter
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
|
||||
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
|
||||
}
|
||||
|
||||
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
|
||||
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
|
||||
entityAccessRegistry.protection.Lock()
|
||||
defer entityAccessRegistry.protection.Unlock()
|
||||
entityAccessRegistry.accessors[mime] = erw
|
||||
}
|
||||
|
||||
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
|
||||
// This package is already initialized with such an accessor using the MIME_JSON contentType.
|
||||
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
|
||||
return entityJSONAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
|
||||
// This package is already initialized with such an accessor using the MIME_XML contentType.
|
||||
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
|
||||
return entityXMLAccess{ContentType: contentType}
|
||||
}
|
||||
|
||||
// accessorAt returns the registered ReaderWriter for this MIME type.
|
||||
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
|
||||
r.protection.RLock()
|
||||
defer r.protection.RUnlock()
|
||||
er, ok := r.accessors[mime]
|
||||
if !ok {
|
||||
// retry with reverse lookup
|
||||
// more expensive but we are in an exceptional situation anyway
|
||||
for k, v := range r.accessors {
|
||||
if strings.Contains(mime, k) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return er, ok
|
||||
}
|
||||
|
||||
// entityXMLAccess is a EntityReaderWriter for XML encoding
|
||||
type entityXMLAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from XML
|
||||
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
|
||||
return xml.NewDecoder(req.Request.Body).Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeXML(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// writeXML marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := xml.MarshalIndent(v, " ", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write([]byte(xml.Header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return xml.NewEncoder(resp).Encode(v)
|
||||
}
|
||||
|
||||
// entityJSONAccess is a EntityReaderWriter for JSON encoding
|
||||
type entityJSONAccess struct {
|
||||
// This is used for setting the Content-Type header when writing
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// Read unmarshalls the value from JSON
|
||||
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
|
||||
decoder := NewDecoder(req.Request.Body)
|
||||
decoder.UseNumber()
|
||||
return decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
||||
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
|
||||
return writeJSON(resp, status, e.ContentType, v)
|
||||
}
|
||||
|
||||
// write marshalls the value to JSON and set the Content-Type Header.
|
||||
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
|
||||
if v == nil {
|
||||
resp.WriteHeader(status)
|
||||
// do not write a nil representation
|
||||
return nil
|
||||
}
|
||||
if resp.prettyPrint {
|
||||
// pretty output must be created and written explicitly
|
||||
output, err := MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
_, err = resp.Write(output)
|
||||
return err
|
||||
}
|
||||
// not-so-pretty
|
||||
resp.Header().Set(HEADER_ContentType, contentType)
|
||||
resp.WriteHeader(status)
|
||||
return NewEncoder(resp).Encode(v)
|
||||
}
|
21
client/vendor/github.com/emicklei/go-restful/v3/extensions.go
generated
vendored
Normal file
21
client/vendor/github.com/emicklei/go-restful/v3/extensions.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2021 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// ExtensionProperties provides storage of vendor extensions for entities
|
||||
type ExtensionProperties struct {
|
||||
// Extensions vendor extensions used to describe extra functionality
|
||||
// (https://swagger.io/docs/specification/2-0/swagger-extensions/)
|
||||
Extensions map[string]interface{}
|
||||
}
|
||||
|
||||
// AddExtension adds or updates a key=value pair to the extension map.
|
||||
func (ep *ExtensionProperties) AddExtension(key string, value interface{}) {
|
||||
if ep.Extensions == nil {
|
||||
ep.Extensions = map[string]interface{}{key: value}
|
||||
} else {
|
||||
ep.Extensions[key] = value
|
||||
}
|
||||
}
|
37
client/vendor/github.com/emicklei/go-restful/v3/filter.go
generated
vendored
Normal file
37
client/vendor/github.com/emicklei/go-restful/v3/filter.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
|
||||
type FilterChain struct {
|
||||
Filters []FilterFunction // ordered list of FilterFunction
|
||||
Index int // index into filters that is currently in progress
|
||||
Target RouteFunction // function to call after passing all filters
|
||||
ParameterDocs []*Parameter // the parameter docs for the route
|
||||
Operation string // the name of the operation
|
||||
}
|
||||
|
||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
||||
// Each filter can decide to proceed to the next Filter or handle the Response itself.
|
||||
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
|
||||
if f.Index < len(f.Filters) {
|
||||
f.Index++
|
||||
f.Filters[f.Index-1](request, response, f)
|
||||
} else {
|
||||
f.Target(request, response)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
|
||||
type FilterFunction func(*Request, *Response, *FilterChain)
|
||||
|
||||
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
|
||||
// See examples/restful-no-cache-filter.go for usage
|
||||
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
|
||||
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
|
||||
resp.Header().Set("Expires", "0") // Proxies.
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
21
client/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go
generated
vendored
Normal file
21
client/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler
|
||||
type HttpMiddlewareHandler func(http.Handler) http.Handler
|
||||
|
||||
// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction.
|
||||
func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction {
|
||||
return func(req *Request, resp *Response, chain *FilterChain) {
|
||||
next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
req.Request = r
|
||||
resp.ResponseWriter = rw
|
||||
chain.ProcessFilter(req, resp)
|
||||
})
|
||||
|
||||
middleware(next).ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
}
|
11
client/vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
Normal file
11
client/vendor/github.com/emicklei/go-restful/v3/json.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
var (
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
12
client/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
Normal file
12
client/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build jsoniter
|
||||
|
||||
package restful
|
||||
|
||||
import "github.com/json-iterator/go"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
MarshalIndent = json.MarshalIndent
|
||||
NewDecoder = json.NewDecoder
|
||||
NewEncoder = json.NewEncoder
|
||||
)
|
326
client/vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
Normal file
326
client/vendor/github.com/emicklei/go-restful/v3/jsr311.go
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
|
||||
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
|
||||
// RouterJSR311 implements the Router interface.
|
||||
// Concept of locators is not implemented.
|
||||
type RouterJSR311 struct{}
|
||||
|
||||
// SelectRoute is part of the Router interface and returns the best match
|
||||
// for the WebService and its Route for the given Request.
|
||||
func (r RouterJSR311) SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
|
||||
|
||||
// Identify the root resource class (WebService)
|
||||
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
|
||||
if err != nil {
|
||||
return nil, nil, NewError(http.StatusNotFound, "")
|
||||
}
|
||||
// Obtain the set of candidate methods (Routes)
|
||||
routes := r.selectRoutes(dispatcher, finalMatch)
|
||||
if len(routes) == 0 {
|
||||
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
||||
}
|
||||
|
||||
// Identify the method (Route) that will handle the request
|
||||
route, ok := r.detectRoute(routes, httpRequest)
|
||||
return dispatcher, route, ok
|
||||
}
|
||||
|
||||
// ExtractParameters is used to obtain the path parameters from the route using the same matching
|
||||
// engine as the JSR 311 router.
|
||||
func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string {
|
||||
webServiceExpr := webService.pathExpr
|
||||
webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath)
|
||||
pathParameters := r.extractParams(webServiceExpr, webServiceMatches)
|
||||
routeExpr := route.pathExpr
|
||||
routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1])
|
||||
routeParams := r.extractParams(routeExpr, routeMatches)
|
||||
for key, value := range routeParams {
|
||||
pathParameters[key] = value
|
||||
}
|
||||
return pathParameters
|
||||
}
|
||||
|
||||
func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string {
|
||||
params := map[string]string{}
|
||||
for i := 1; i < len(matches); i++ {
|
||||
if len(pathExpr.VarNames) >= i {
|
||||
params[pathExpr.VarNames[i-1]] = matches[i]
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
candidates := make([]*Route, 0, 8)
|
||||
for i, each := range routes {
|
||||
ok := true
|
||||
for _, fn := range each.If {
|
||||
if !fn(httpRequest) {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
candidates = append(candidates, &routes[i])
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
|
||||
}
|
||||
return nil, NewError(http.StatusNotFound, "404: Not Found")
|
||||
}
|
||||
|
||||
// http method
|
||||
previous := candidates
|
||||
candidates = candidates[:0]
|
||||
for _, each := range previous {
|
||||
if httpRequest.Method == each.Method {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
|
||||
}
|
||||
allowed := []string{}
|
||||
allowedLoop:
|
||||
for _, candidate := range previous {
|
||||
for _, method := range allowed {
|
||||
if method == candidate.Method {
|
||||
continue allowedLoop
|
||||
}
|
||||
}
|
||||
allowed = append(allowed, candidate.Method)
|
||||
}
|
||||
header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}}
|
||||
return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header)
|
||||
}
|
||||
|
||||
// content-type
|
||||
contentType := httpRequest.Header.Get(HEADER_ContentType)
|
||||
previous = candidates
|
||||
candidates = candidates[:0]
|
||||
for _, each := range previous {
|
||||
if each.matchesContentType(contentType) {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
|
||||
}
|
||||
if httpRequest.ContentLength > 0 {
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
}
|
||||
|
||||
// accept
|
||||
previous = candidates
|
||||
candidates = candidates[:0]
|
||||
accept := httpRequest.Header.Get(HEADER_Accept)
|
||||
if len(accept) == 0 {
|
||||
accept = "*/*"
|
||||
}
|
||||
for _, each := range previous {
|
||||
if each.matchesAccept(accept) {
|
||||
candidates = append(candidates, each)
|
||||
}
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
|
||||
}
|
||||
available := []string{}
|
||||
for _, candidate := range previous {
|
||||
available = append(available, candidate.Produces...)
|
||||
}
|
||||
// if POST,PUT,PATCH without body
|
||||
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
|
||||
if (method == http.MethodPost ||
|
||||
method == http.MethodPut ||
|
||||
method == http.MethodPatch) && length == "" {
|
||||
return nil, NewError(
|
||||
http.StatusUnsupportedMediaType,
|
||||
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
}
|
||||
return nil, NewError(
|
||||
http.StatusNotAcceptable,
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return candidates[0], nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// n/m > n/* > */*
|
||||
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
|
||||
// TODO
|
||||
return &routes[0]
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
|
||||
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
|
||||
filtered := &sortableRouteCandidates{}
|
||||
for _, each := range dispatcher.Routes() {
|
||||
pathExpr := each.pathExpr
|
||||
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
|
||||
}
|
||||
return []Route{}
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
|
||||
// select other routes from candidates whoes expression matches rmatch
|
||||
matchingRoutes := []Route{filtered.candidates[0].route}
|
||||
for c := 1; c < len(filtered.candidates); c++ {
|
||||
each := filtered.candidates[c]
|
||||
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
|
||||
matchingRoutes = append(matchingRoutes, each.route)
|
||||
}
|
||||
}
|
||||
return matchingRoutes
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
|
||||
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
|
||||
filtered := &sortableDispatcherCandidates{}
|
||||
for _, each := range dispatchers {
|
||||
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
filtered.candidates = append(filtered.candidates,
|
||||
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
|
||||
}
|
||||
}
|
||||
if len(filtered.candidates) == 0 {
|
||||
if trace {
|
||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
|
||||
}
|
||||
return nil, "", errors.New("not found")
|
||||
}
|
||||
sort.Sort(sort.Reverse(filtered))
|
||||
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Routes
|
||||
|
||||
type routeCandidate struct {
|
||||
route Route
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
|
||||
func (r routeCandidate) expressionToMatch() string {
|
||||
return r.route.pathExpr.Source
|
||||
}
|
||||
|
||||
func (r routeCandidate) String() string {
|
||||
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
|
||||
}
|
||||
|
||||
type sortableRouteCandidates struct {
|
||||
candidates []routeCandidate
|
||||
}
|
||||
|
||||
func (rcs *sortableRouteCandidates) Len() int {
|
||||
return len(rcs.candidates)
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Swap(i, j int) {
|
||||
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
|
||||
}
|
||||
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
|
||||
ci := rcs.candidates[i]
|
||||
cj := rcs.candidates[j]
|
||||
// primary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
if ci.nonDefaultCount < cj.nonDefaultCount {
|
||||
return true
|
||||
}
|
||||
if ci.nonDefaultCount > cj.nonDefaultCount {
|
||||
return false
|
||||
}
|
||||
// quaternary key ("source" is interpreted as Path)
|
||||
return ci.route.Path < cj.route.Path
|
||||
}
|
||||
|
||||
// Types and functions to support the sorting of Dispatchers
|
||||
|
||||
type dispatcherCandidate struct {
|
||||
dispatcher *WebService
|
||||
finalMatch string
|
||||
matchesCount int // the number of capturing groups
|
||||
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’)
|
||||
}
|
||||
type sortableDispatcherCandidates struct {
|
||||
candidates []dispatcherCandidate
|
||||
}
|
||||
|
||||
func (dc *sortableDispatcherCandidates) Len() int {
|
||||
return len(dc.candidates)
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
|
||||
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
|
||||
}
|
||||
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
|
||||
ci := dc.candidates[i]
|
||||
cj := dc.candidates[j]
|
||||
// primary key
|
||||
if ci.matchesCount < cj.matchesCount {
|
||||
return true
|
||||
}
|
||||
if ci.matchesCount > cj.matchesCount {
|
||||
return false
|
||||
}
|
||||
// secundary key
|
||||
if ci.literalCount < cj.literalCount {
|
||||
return true
|
||||
}
|
||||
if ci.literalCount > cj.literalCount {
|
||||
return false
|
||||
}
|
||||
// tertiary key
|
||||
return ci.nonDefaultCount < cj.nonDefaultCount
|
||||
}
|
34
client/vendor/github.com/emicklei/go-restful/v3/log/log.go
generated
vendored
Normal file
34
client/vendor/github.com/emicklei/go-restful/v3/log/log.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
var Logger StdLogger
|
||||
|
||||
func init() {
|
||||
// default Logger
|
||||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for this package
|
||||
func SetLogger(customLogger StdLogger) {
|
||||
Logger = customLogger
|
||||
}
|
||||
|
||||
// Print delegates to the Logger
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Print(v...)
|
||||
}
|
||||
|
||||
// Printf delegates to the Logger
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Printf(format, v...)
|
||||
}
|
32
client/vendor/github.com/emicklei/go-restful/v3/logger.go
generated
vendored
Normal file
32
client/vendor/github.com/emicklei/go-restful/v3/logger.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2014 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
import (
|
||||
"github.com/emicklei/go-restful/v3/log"
|
||||
)
|
||||
|
||||
var trace bool = false
|
||||
var traceLogger log.StdLogger
|
||||
|
||||
func init() {
|
||||
traceLogger = log.Logger // use the package logger by default
|
||||
}
|
||||
|
||||
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
|
||||
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
|
||||
func TraceLogger(logger log.StdLogger) {
|
||||
traceLogger = logger
|
||||
EnableTracing(logger != nil)
|
||||
}
|
||||
|
||||
// SetLogger exposes the setter for the global logger on the top-level package
|
||||
func SetLogger(customLogger log.StdLogger) {
|
||||
log.SetLogger(customLogger)
|
||||
}
|
||||
|
||||
// EnableTracing can be used to Trace logging on and off.
|
||||
func EnableTracing(enabled bool) {
|
||||
trace = enabled
|
||||
}
|
50
client/vendor/github.com/emicklei/go-restful/v3/mime.go
generated
vendored
Normal file
50
client/vendor/github.com/emicklei/go-restful/v3/mime.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type mime struct {
|
||||
media string
|
||||
quality float64
|
||||
}
|
||||
|
||||
// insertMime adds a mime to a list and keeps it sorted by quality.
|
||||
func insertMime(l []mime, e mime) []mime {
|
||||
for i, each := range l {
|
||||
// if current mime has lower quality then insert before
|
||||
if e.quality > each.quality {
|
||||
left := append([]mime{}, l[0:i]...)
|
||||
return append(append(left, e), l[i:]...)
|
||||
}
|
||||
}
|
||||
return append(l, e)
|
||||
}
|
||||
|
||||
const qFactorWeightingKey = "q"
|
||||
|
||||
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
|
||||
// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
|
||||
func sortedMimes(accept string) (sorted []mime) {
|
||||
for _, each := range strings.Split(accept, ",") {
|
||||
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
|
||||
if len(typeAndQuality) == 1 {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
||||
} else {
|
||||
// take factor
|
||||
qAndWeight := strings.Split(typeAndQuality[1], "=")
|
||||
if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey {
|
||||
f, err := strconv.ParseFloat(qAndWeight[1], 64)
|
||||
if err != nil {
|
||||
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
|
||||
} else {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
|
||||
}
|
||||
} else {
|
||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
34
client/vendor/github.com/emicklei/go-restful/v3/options_filter.go
generated
vendored
Normal file
34
client/vendor/github.com/emicklei/go-restful/v3/options_filter.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package restful
|
||||
|
||||
import "strings"
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// As for any filter, you can also install it for a particular WebService within a Container.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
|
||||
if "OPTIONS" != req.Request.Method {
|
||||
chain.ProcessFilter(req, resp)
|
||||
return
|
||||
}
|
||||
|
||||
archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
||||
methods := strings.Join(c.computeAllowedMethods(req), ",")
|
||||
origin := req.Request.Header.Get(HEADER_Origin)
|
||||
|
||||
resp.AddHeader(HEADER_Allow, methods)
|
||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
|
||||
resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
|
||||
}
|
||||
|
||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
||||
// and provides the response with a set of allowed methods for the request URL Path.
|
||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
||||
func OPTIONSFilter() FilterFunction {
|
||||
return DefaultContainer.OPTIONSFilter
|
||||
}
|
242
client/vendor/github.com/emicklei/go-restful/v3/parameter.go
generated
vendored
Normal file
242
client/vendor/github.com/emicklei/go-restful/v3/parameter.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
package restful
|
||||
|
||||
import "sort"
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
// PathParameterKind = indicator of Request parameter type "path"
|
||||
PathParameterKind = iota
|
||||
|
||||
// QueryParameterKind = indicator of Request parameter type "query"
|
||||
QueryParameterKind
|
||||
|
||||
// BodyParameterKind = indicator of Request parameter type "body"
|
||||
BodyParameterKind
|
||||
|
||||
// HeaderParameterKind = indicator of Request parameter type "header"
|
||||
HeaderParameterKind
|
||||
|
||||
// FormParameterKind = indicator of Request parameter type "form"
|
||||
FormParameterKind
|
||||
|
||||
// MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data"
|
||||
MultiPartFormParameterKind
|
||||
|
||||
// CollectionFormatCSV comma separated values `foo,bar`
|
||||
CollectionFormatCSV = CollectionFormat("csv")
|
||||
|
||||
// CollectionFormatSSV space separated values `foo bar`
|
||||
CollectionFormatSSV = CollectionFormat("ssv")
|
||||
|
||||
// CollectionFormatTSV tab separated values `foo\tbar`
|
||||
CollectionFormatTSV = CollectionFormat("tsv")
|
||||
|
||||
// CollectionFormatPipes pipe separated values `foo|bar`
|
||||
CollectionFormatPipes = CollectionFormat("pipes")
|
||||
|
||||
// CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single
|
||||
// instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters
|
||||
CollectionFormatMulti = CollectionFormat("multi")
|
||||
)
|
||||
|
||||
type CollectionFormat string
|
||||
|
||||
func (cf CollectionFormat) String() string {
|
||||
return string(cf)
|
||||
}
|
||||
|
||||
// Parameter is for documententing the parameter used in a Http Request
|
||||
// ParameterData kinds are Path,Query and Body
|
||||
type Parameter struct {
|
||||
data *ParameterData
|
||||
}
|
||||
|
||||
// ParameterData represents the state of a Parameter.
|
||||
// It is made public to make it accessible to e.g. the Swagger package.
|
||||
type ParameterData struct {
|
||||
ExtensionProperties
|
||||
Name, Description, DataType, DataFormat string
|
||||
Kind int
|
||||
Required bool
|
||||
// AllowableValues is deprecated. Use PossibleValues instead
|
||||
AllowableValues map[string]string
|
||||
PossibleValues []string
|
||||
AllowMultiple bool
|
||||
AllowEmptyValue bool
|
||||
DefaultValue string
|
||||
CollectionFormat string
|
||||
Pattern string
|
||||
Minimum *float64
|
||||
Maximum *float64
|
||||
MinLength *int64
|
||||
MaxLength *int64
|
||||
MinItems *int64
|
||||
MaxItems *int64
|
||||
UniqueItems bool
|
||||
}
|
||||
|
||||
// Data returns the state of the Parameter
|
||||
func (p *Parameter) Data() ParameterData {
|
||||
return *p.data
|
||||
}
|
||||
|
||||
// Kind returns the parameter type indicator (see const for valid values)
|
||||
func (p *Parameter) Kind() int {
|
||||
return p.data.Kind
|
||||
}
|
||||
|
||||
func (p *Parameter) bePath() *Parameter {
|
||||
p.data.Kind = PathParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beQuery() *Parameter {
|
||||
p.data.Kind = QueryParameterKind
|
||||
return p
|
||||
}
|
||||
func (p *Parameter) beBody() *Parameter {
|
||||
p.data.Kind = BodyParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beHeader() *Parameter {
|
||||
p.data.Kind = HeaderParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beForm() *Parameter {
|
||||
p.data.Kind = FormParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parameter) beMultiPartForm() *Parameter {
|
||||
p.data.Kind = MultiPartFormParameterKind
|
||||
return p
|
||||
}
|
||||
|
||||
// Required sets the required field and returns the receiver
|
||||
func (p *Parameter) Required(required bool) *Parameter {
|
||||
p.data.Required = required
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowMultiple sets the allowMultiple field and returns the receiver
|
||||
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
|
||||
p.data.AllowMultiple = multiple
|
||||
return p
|
||||
}
|
||||
|
||||
// AddExtension adds or updates a key=value pair to the extension map
|
||||
func (p *Parameter) AddExtension(key string, value interface{}) *Parameter {
|
||||
p.data.AddExtension(key, value)
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver
|
||||
func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter {
|
||||
p.data.AllowEmptyValue = multiple
|
||||
return p
|
||||
}
|
||||
|
||||
// AllowableValues is deprecated. Use PossibleValues instead. Both will be set.
|
||||
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
||||
p.data.AllowableValues = values
|
||||
|
||||
allowableSortedKeys := make([]string, 0, len(values))
|
||||
for k := range values {
|
||||
allowableSortedKeys = append(allowableSortedKeys, k)
|
||||
}
|
||||
sort.Strings(allowableSortedKeys)
|
||||
|
||||
p.data.PossibleValues = make([]string, 0, len(values))
|
||||
for _, k := range allowableSortedKeys {
|
||||
p.data.PossibleValues = append(p.data.PossibleValues, values[k])
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// PossibleValues sets the possible values field and returns the receiver
|
||||
func (p *Parameter) PossibleValues(values []string) *Parameter {
|
||||
p.data.PossibleValues = values
|
||||
return p
|
||||
}
|
||||
|
||||
// DataType sets the dataType field and returns the receiver
|
||||
func (p *Parameter) DataType(typeName string) *Parameter {
|
||||
p.data.DataType = typeName
|
||||
return p
|
||||
}
|
||||
|
||||
// DataFormat sets the dataFormat field for Swagger UI
|
||||
func (p *Parameter) DataFormat(formatName string) *Parameter {
|
||||
p.data.DataFormat = formatName
|
||||
return p
|
||||
}
|
||||
|
||||
// DefaultValue sets the default value field and returns the receiver
|
||||
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
|
||||
p.data.DefaultValue = stringRepresentation
|
||||
return p
|
||||
}
|
||||
|
||||
// Description sets the description value field and returns the receiver
|
||||
func (p *Parameter) Description(doc string) *Parameter {
|
||||
p.data.Description = doc
|
||||
return p
|
||||
}
|
||||
|
||||
// CollectionFormat sets the collection format for an array type
|
||||
func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
|
||||
p.data.CollectionFormat = format.String()
|
||||
return p
|
||||
}
|
||||
|
||||
// Pattern sets the pattern field and returns the receiver
|
||||
func (p *Parameter) Pattern(pattern string) *Parameter {
|
||||
p.data.Pattern = pattern
|
||||
return p
|
||||
}
|
||||
|
||||
// Minimum sets the minimum field and returns the receiver
|
||||
func (p *Parameter) Minimum(minimum float64) *Parameter {
|
||||
p.data.Minimum = &minimum
|
||||
return p
|
||||
}
|
||||
|
||||
// Maximum sets the maximum field and returns the receiver
|
||||
func (p *Parameter) Maximum(maximum float64) *Parameter {
|
||||
p.data.Maximum = &maximum
|
||||
return p
|
||||
}
|
||||
|
||||
// MinLength sets the minLength field and returns the receiver
|
||||
func (p *Parameter) MinLength(minLength int64) *Parameter {
|
||||
p.data.MinLength = &minLength
|
||||
return p
|
||||
}
|
||||
|
||||
// MaxLength sets the maxLength field and returns the receiver
|
||||
func (p *Parameter) MaxLength(maxLength int64) *Parameter {
|
||||
p.data.MaxLength = &maxLength
|
||||
return p
|
||||
}
|
||||
|
||||
// MinItems sets the minItems field and returns the receiver
|
||||
func (p *Parameter) MinItems(minItems int64) *Parameter {
|
||||
p.data.MinItems = &minItems
|
||||
return p
|
||||
}
|
||||
|
||||
// MaxItems sets the maxItems field and returns the receiver
|
||||
func (p *Parameter) MaxItems(maxItems int64) *Parameter {
|
||||
p.data.MaxItems = &maxItems
|
||||
return p
|
||||
}
|
||||
|
||||
// UniqueItems sets the uniqueItems field and returns the receiver
|
||||
func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter {
|
||||
p.data.UniqueItems = uniqueItems
|
||||
return p
|
||||
}
|
74
client/vendor/github.com/emicklei/go-restful/v3/path_expression.go
generated
vendored
Normal file
74
client/vendor/github.com/emicklei/go-restful/v3/path_expression.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathExpression holds a compiled path expression (RegExp) needed to match against
|
||||
// Http request paths and to extract path parameter values.
|
||||
type pathExpression struct {
|
||||
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
|
||||
VarNames []string // the names of parameters (enclosed by {}) in the path
|
||||
VarCount int // the number of named parameters (enclosed by {}) in the path
|
||||
Matcher *regexp.Regexp
|
||||
Source string // Path as defined by the RouteBuilder
|
||||
tokens []string
|
||||
}
|
||||
|
||||
// NewPathExpression creates a PathExpression from the input URL path.
|
||||
// Returns an error if the path is invalid.
|
||||
func newPathExpression(path string) (*pathExpression, error) {
|
||||
expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
|
||||
compiled, err := regexp.Compile(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
|
||||
func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("^")
|
||||
//tokens = strings.Split(template, "/")
|
||||
tokens = tokenizePath(template)
|
||||
for _, each := range tokens {
|
||||
if each == "" {
|
||||
continue
|
||||
}
|
||||
buffer.WriteString("/")
|
||||
if strings.HasPrefix(each, "{") {
|
||||
// check for regular expression in variable
|
||||
colon := strings.Index(each, ":")
|
||||
var varName string
|
||||
if colon != -1 {
|
||||
// extract expression
|
||||
varName = strings.TrimSpace(each[1:colon])
|
||||
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
|
||||
if paramExpr == "*" { // special case
|
||||
buffer.WriteString("(.*)")
|
||||
} else {
|
||||
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
|
||||
}
|
||||
} else {
|
||||
// plain var
|
||||
varName = strings.TrimSpace(each[1 : len(each)-1])
|
||||
buffer.WriteString("([^/]+?)")
|
||||
}
|
||||
varNames = append(varNames, varName)
|
||||
varCount += 1
|
||||
} else {
|
||||
literalCount += len(each)
|
||||
encoded := each // TODO URI encode
|
||||
buffer.WriteString(regexp.QuoteMeta(encoded))
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
|
||||
}
|
74
client/vendor/github.com/emicklei/go-restful/v3/path_processor.go
generated
vendored
Normal file
74
client/vendor/github.com/emicklei/go-restful/v3/path_processor.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Copyright 2018 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path.
|
||||
// If a Router does not implement this interface then the default behaviour will be used.
|
||||
type PathProcessor interface {
|
||||
// ExtractParameters gets the path parameters defined in the route and webService from the urlPath
|
||||
ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string
|
||||
}
|
||||
|
||||
type defaultPathProcessor struct{}
|
||||
|
||||
// Extract the parameters from the request url path
|
||||
func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string {
|
||||
urlParts := tokenizePath(urlPath)
|
||||
pathParameters := map[string]string{}
|
||||
for i, key := range r.pathParts {
|
||||
var value string
|
||||
if i >= len(urlParts) {
|
||||
value = ""
|
||||
} else {
|
||||
value = urlParts[i]
|
||||
}
|
||||
if r.hasCustomVerb && hasCustomVerb(key) {
|
||||
key = removeCustomVerb(key)
|
||||
value = removeCustomVerb(value)
|
||||
}
|
||||
|
||||
if strings.Index(key, "{") > -1 { // path-parameter
|
||||
if colon := strings.Index(key, ":"); colon != -1 {
|
||||
// extract by regex
|
||||
regPart := key[colon+1 : len(key)-1]
|
||||
keyPart := key[1:colon]
|
||||
if regPart == "*" {
|
||||
pathParameters[keyPart] = untokenizePath(i, urlParts)
|
||||
break
|
||||
} else {
|
||||
pathParameters[keyPart] = value
|
||||
}
|
||||
} else {
|
||||
// without enclosing {}
|
||||
startIndex := strings.Index(key, "{")
|
||||
endKeyIndex := strings.Index(key, "}")
|
||||
|
||||
suffixLength := len(key) - endKeyIndex - 1
|
||||
endValueIndex := len(value) - suffixLength
|
||||
|
||||
pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex]
|
||||
}
|
||||
}
|
||||
}
|
||||
return pathParameters
|
||||
}
|
||||
|
||||
// Untokenize back into an URL path using the slash separator
|
||||
func untokenizePath(offset int, parts []string) string {
|
||||
var buffer bytes.Buffer
|
||||
for p := offset; p < len(parts); p++ {
|
||||
buffer.WriteString(parts[p])
|
||||
// do not end
|
||||
if p < len(parts)-1 {
|
||||
buffer.WriteString("/")
|
||||
}
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
132
client/vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
Normal file
132
client/vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var defaultRequestContentType string
|
||||
|
||||
// Request is a wrapper for a http Request that provides convenience methods
|
||||
type Request struct {
|
||||
Request *http.Request
|
||||
pathParameters map[string]string
|
||||
attributes map[string]interface{} // for storing request-scoped values
|
||||
selectedRoute *Route // is nil when no route was matched
|
||||
}
|
||||
|
||||
func NewRequest(httpRequest *http.Request) *Request {
|
||||
return &Request{
|
||||
Request: httpRequest,
|
||||
pathParameters: map[string]string{},
|
||||
attributes: map[string]interface{}{},
|
||||
} // empty parameters, attributes
|
||||
}
|
||||
|
||||
// If ContentType is missing or */* is given then fall back to this type, otherwise
|
||||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
}
|
||||
|
||||
// PathParameter accesses the Path parameter value by its name
|
||||
func (r *Request) PathParameter(name string) string {
|
||||
return r.pathParameters[name]
|
||||
}
|
||||
|
||||
// PathParameters accesses the Path parameter values
|
||||
func (r *Request) PathParameters() map[string]string {
|
||||
return r.pathParameters
|
||||
}
|
||||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
}
|
||||
|
||||
// QueryParameters returns the all the query parameters values by name
|
||||
func (r *Request) QueryParameters(name string) []string {
|
||||
return r.Request.URL.Query()[name]
|
||||
}
|
||||
|
||||
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
|
||||
func (r *Request) BodyParameter(name string) (string, error) {
|
||||
err := r.Request.ParseForm()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return r.Request.PostFormValue(name), nil
|
||||
}
|
||||
|
||||
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
|
||||
func (r *Request) HeaderParameter(name string) string {
|
||||
return r.Request.Header.Get(name)
|
||||
}
|
||||
|
||||
// ReadEntity checks the Accept header and reads the content into the entityPointer.
|
||||
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
||||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
||||
|
||||
// check if the request body needs decompression
|
||||
if ENCODING_GZIP == contentEncoding {
|
||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
||||
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
|
||||
gzipReader.Reset(r.Request.Body)
|
||||
r.Request.Body = gzipReader
|
||||
} else if ENCODING_DEFLATE == contentEncoding {
|
||||
zlibReader, err := zlib.NewReader(r.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Request.Body = zlibReader
|
||||
}
|
||||
|
||||
// lookup the EntityReader, use defaultRequestContentType if needed and provided
|
||||
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
|
||||
if !ok {
|
||||
if len(defaultRequestContentType) != 0 {
|
||||
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
|
||||
}
|
||||
if !ok {
|
||||
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
|
||||
}
|
||||
}
|
||||
return entityReader.Read(r, entityPointer)
|
||||
}
|
||||
|
||||
// SetAttribute adds or replaces the attribute with the given value.
|
||||
func (r *Request) SetAttribute(name string, value interface{}) {
|
||||
r.attributes[name] = value
|
||||
}
|
||||
|
||||
// Attribute returns the value associated to the given name. Returns nil if absent.
|
||||
func (r Request) Attribute(name string) interface{} {
|
||||
return r.attributes[name]
|
||||
}
|
||||
|
||||
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
// If no route was matched then return an empty string.
|
||||
func (r Request) SelectedRoutePath() string {
|
||||
if r.selectedRoute == nil {
|
||||
return ""
|
||||
}
|
||||
// skip creating an accessor
|
||||
return r.selectedRoute.Path
|
||||
}
|
||||
|
||||
// SelectedRoute returns a reader to access the selected Route by the container
|
||||
// Returns nil if no route was matched.
|
||||
func (r Request) SelectedRoute() RouteReader {
|
||||
if r.selectedRoute == nil {
|
||||
return nil
|
||||
}
|
||||
return routeAccessor{route: r.selectedRoute}
|
||||
}
|
256
client/vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
Normal file
256
client/vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
||||
var DefaultResponseMimeType string
|
||||
|
||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
||||
var PrettyPrintResponses = true
|
||||
|
||||
// Response is a wrapper on the actual http ResponseWriter
|
||||
// It provides several convenience methods to prepare and write response content.
|
||||
type Response struct {
|
||||
http.ResponseWriter
|
||||
requestAccept string // mime-type what the Http Request says it wants to receive
|
||||
routeProduces []string // mime-types what the Route says it can produce
|
||||
statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
|
||||
contentLength int // number of bytes written for the response body
|
||||
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
|
||||
err error // err property is kept when WriteError is called
|
||||
hijacker http.Hijacker // if underlying ResponseWriter supports it
|
||||
}
|
||||
|
||||
// NewResponse creates a new response based on a http ResponseWriter.
|
||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
||||
hijacker, _ := httpWriter.(http.Hijacker)
|
||||
return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
|
||||
}
|
||||
|
||||
// DefaultResponseContentType set a default.
|
||||
// If Accept header matching fails, fall back to this type.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultResponseContentType(restful.MIME_JSON)
|
||||
func DefaultResponseContentType(mime string) {
|
||||
DefaultResponseMimeType = mime
|
||||
}
|
||||
|
||||
// InternalServerError writes the StatusInternalServerError header.
|
||||
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
|
||||
func (r Response) InternalServerError() Response {
|
||||
r.WriteHeader(http.StatusInternalServerError)
|
||||
return r
|
||||
}
|
||||
|
||||
// Hijack implements the http.Hijacker interface. This expands
|
||||
// the Response to fulfill http.Hijacker if the underlying
|
||||
// http.ResponseWriter supports it.
|
||||
func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if r.hijacker == nil {
|
||||
return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
|
||||
}
|
||||
return r.hijacker.Hijack()
|
||||
}
|
||||
|
||||
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
|
||||
func (r *Response) PrettyPrint(bePretty bool) {
|
||||
r.prettyPrint = bePretty
|
||||
}
|
||||
|
||||
// AddHeader is a shortcut for .Header().Add(header,value)
|
||||
func (r Response) AddHeader(header string, value string) Response {
|
||||
r.Header().Add(header, value)
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
|
||||
func (r *Response) SetRequestAccepts(mime string) {
|
||||
r.requestAccept = mime
|
||||
}
|
||||
|
||||
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
|
||||
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
|
||||
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
|
||||
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
||||
sorted := sortedMimes(r.requestAccept)
|
||||
for _, eachAccept := range sorted {
|
||||
for _, eachProduce := range r.routeProduces {
|
||||
if eachProduce == eachAccept.media {
|
||||
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
if eachAccept.media == "*/*" {
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// if requestAccept is empty
|
||||
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
|
||||
if !ok {
|
||||
// if not registered then fallback to the defaults (if set)
|
||||
if DefaultResponseMimeType == MIME_JSON {
|
||||
return entityAccessRegistry.accessorAt(MIME_JSON)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
||||
return w, true
|
||||
}
|
||||
}
|
||||
if trace {
|
||||
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
|
||||
}
|
||||
}
|
||||
return writer, ok
|
||||
}
|
||||
|
||||
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
|
||||
func (r *Response) WriteEntity(value interface{}) error {
|
||||
return r.WriteHeaderAndEntity(http.StatusOK, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
|
||||
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
|
||||
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
|
||||
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
|
||||
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
|
||||
// Current implementation ignores any q-parameters in the Accept Header.
|
||||
// Returns an error if the value could not be written on the response.
|
||||
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
|
||||
writer, ok := r.EntityWriter()
|
||||
if !ok {
|
||||
r.WriteHeader(http.StatusNotAcceptable)
|
||||
return nil
|
||||
}
|
||||
return writer.Write(r, status, value)
|
||||
}
|
||||
|
||||
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsXml(value interface{}) error {
|
||||
return writeXML(r, http.StatusOK, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
|
||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
|
||||
return writeXML(r, status, MIME_XML, value)
|
||||
}
|
||||
|
||||
// WriteAsJson is a convenience method for writing a value in json.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteAsJson(value interface{}) error {
|
||||
return writeJSON(r, http.StatusOK, MIME_JSON, value)
|
||||
}
|
||||
|
||||
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteJson(value interface{}, contentType string) error {
|
||||
return writeJSON(r, http.StatusOK, contentType, value)
|
||||
}
|
||||
|
||||
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
|
||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
||||
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
|
||||
return writeJSON(r, status, contentType, value)
|
||||
}
|
||||
|
||||
// WriteError writes the http status and the error string on the response. err can be nil.
|
||||
// Return an error if writing was not successful.
|
||||
func (r *Response) WriteError(httpStatus int, err error) (writeErr error) {
|
||||
r.err = err
|
||||
if err == nil {
|
||||
writeErr = r.WriteErrorString(httpStatus, "")
|
||||
} else {
|
||||
writeErr = r.WriteErrorString(httpStatus, err.Error())
|
||||
}
|
||||
return writeErr
|
||||
}
|
||||
|
||||
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
||||
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
|
||||
r.err = err
|
||||
return r.WriteHeaderAndEntity(httpStatus, err)
|
||||
}
|
||||
|
||||
// WriteErrorString is a convenience method for an error status with the actual error
|
||||
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
|
||||
if r.err == nil {
|
||||
// if not called from WriteError
|
||||
r.err = errors.New(errorReason)
|
||||
}
|
||||
r.WriteHeader(httpStatus)
|
||||
if _, err := r.Write([]byte(errorReason)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implements http.Flusher interface, which sends any buffered data to the client.
|
||||
func (r *Response) Flush() {
|
||||
if f, ok := r.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
} else if trace {
|
||||
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHeader is overridden to remember the Status Code that has been written.
|
||||
// Changes to the Header of the response have no effect after this.
|
||||
func (r *Response) WriteHeader(httpStatus int) {
|
||||
r.statusCode = httpStatus
|
||||
r.ResponseWriter.WriteHeader(httpStatus)
|
||||
}
|
||||
|
||||
// StatusCode returns the code that has been written using WriteHeader.
|
||||
func (r Response) StatusCode() int {
|
||||
if 0 == r.statusCode {
|
||||
// no status code has been written yet; assume OK
|
||||
return http.StatusOK
|
||||
}
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// Write writes the data to the connection as part of an HTTP reply.
|
||||
// Write is part of http.ResponseWriter interface.
|
||||
func (r *Response) Write(bytes []byte) (int, error) {
|
||||
written, err := r.ResponseWriter.Write(bytes)
|
||||
r.contentLength += written
|
||||
return written, err
|
||||
}
|
||||
|
||||
// ContentLength returns the number of bytes written for the response content.
|
||||
// Note that this value is only correct if all data is written through the Response using its Write* methods.
|
||||
// Data written directly using the underlying http.ResponseWriter is not accounted for.
|
||||
func (r Response) ContentLength() int {
|
||||
return r.contentLength
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (r Response) CloseNotify() <-chan bool {
|
||||
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Error returns the err created by WriteError
|
||||
func (r Response) Error() error {
|
||||
return r.err
|
||||
}
|
178
client/vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
Normal file
178
client/vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RouteFunction declares the signature of a function that can be bound to a Route.
|
||||
type RouteFunction func(*Request, *Response)
|
||||
|
||||
// RouteSelectionConditionFunction declares the signature of a function that
|
||||
// can be used to add extra conditional logic when selecting whether the route
|
||||
// matches the HTTP request.
|
||||
type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
|
||||
|
||||
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
||||
type Route struct {
|
||||
ExtensionProperties
|
||||
Method string
|
||||
Produces []string
|
||||
Consumes []string
|
||||
Path string // webservice root path + described path
|
||||
Function RouteFunction
|
||||
Filters []FilterFunction
|
||||
If []RouteSelectionConditionFunction
|
||||
|
||||
// cached values for dispatching
|
||||
relativePath string
|
||||
pathParts []string
|
||||
pathExpr *pathExpression // cached compilation of relativePath as RegExp
|
||||
|
||||
// documentation
|
||||
Doc string
|
||||
Notes string
|
||||
Operation string
|
||||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
DefaultResponse *ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
|
||||
// marks a route as deprecated
|
||||
Deprecated bool
|
||||
|
||||
//Overrides the container.contentEncodingEnabled
|
||||
contentEncodingEnabled *bool
|
||||
|
||||
// indicate route path has custom verb
|
||||
hasCustomVerb bool
|
||||
|
||||
// if a request does not include a content-type header then
|
||||
// depending on the method, it may return a 415 Unsupported Media
|
||||
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
||||
allowedMethodsWithoutContentType []string
|
||||
}
|
||||
|
||||
// Initialize for Route
|
||||
func (r *Route) postBuild() {
|
||||
r.pathParts = tokenizePath(r.Path)
|
||||
r.hasCustomVerb = hasCustomVerb(r.Path)
|
||||
}
|
||||
|
||||
// Create Request and Response from their http versions
|
||||
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
|
||||
wrappedRequest := NewRequest(httpRequest)
|
||||
wrappedRequest.pathParameters = pathParams
|
||||
wrappedRequest.selectedRoute = r
|
||||
wrappedResponse := NewResponse(httpWriter)
|
||||
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
wrappedResponse.routeProduces = r.Produces
|
||||
return wrappedRequest, wrappedResponse
|
||||
}
|
||||
|
||||
func stringTrimSpaceCutset(r rune) bool {
|
||||
return r == ' '
|
||||
}
|
||||
|
||||
// Return whether the mimeType matches to what this Route can produce.
|
||||
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
||||
remaining := mimeTypesWithQuality
|
||||
for {
|
||||
var mimeType string
|
||||
if end := strings.Index(remaining, ","); end == -1 {
|
||||
mimeType, remaining = remaining, ""
|
||||
} else {
|
||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
||||
}
|
||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
||||
mimeType = mimeType[:quality]
|
||||
}
|
||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
||||
if mimeType == "*/*" {
|
||||
return true
|
||||
}
|
||||
for _, producibleType := range r.Produces {
|
||||
if producibleType == "*/*" || producibleType == mimeType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if len(remaining) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
// did not specify what it can consume ; any media type (“*/*”) is assumed
|
||||
return true
|
||||
}
|
||||
|
||||
if len(mimeTypes) == 0 {
|
||||
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
|
||||
m := r.Method
|
||||
// if route specifies less or non-idempotent methods then use that
|
||||
if len(r.allowedMethodsWithoutContentType) > 0 {
|
||||
for _, each := range r.allowedMethodsWithoutContentType {
|
||||
if m == each {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// proceed with default
|
||||
mimeTypes = MIME_OCTET
|
||||
}
|
||||
|
||||
remaining := mimeTypes
|
||||
for {
|
||||
var mimeType string
|
||||
if end := strings.Index(remaining, ","); end == -1 {
|
||||
mimeType, remaining = remaining, ""
|
||||
} else {
|
||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
||||
}
|
||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
||||
mimeType = mimeType[:quality]
|
||||
}
|
||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
||||
for _, consumeableType := range r.Consumes {
|
||||
if consumeableType == "*/*" || consumeableType == mimeType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if len(remaining) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
|
||||
func tokenizePath(path string) []string {
|
||||
if "/" == path {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
}
|
||||
|
||||
// for debugging
|
||||
func (r *Route) String() string {
|
||||
return r.Method + " " + r.Path
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
|
||||
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||
r.contentEncodingEnabled = &enabled
|
||||
}
|
376
client/vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
Normal file
376
client/vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
Normal file
@@ -0,0 +1,376 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/emicklei/go-restful/v3/log"
|
||||
)
|
||||
|
||||
// RouteBuilder is a helper to construct Routes.
|
||||
type RouteBuilder struct {
|
||||
rootPath string
|
||||
currentPath string
|
||||
produces []string
|
||||
consumes []string
|
||||
httpMethod string // required
|
||||
function RouteFunction // required
|
||||
filters []FilterFunction
|
||||
conditions []RouteSelectionConditionFunction
|
||||
allowedMethodsWithoutContentType []string // see Route
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
operation string
|
||||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
defaultResponse *ResponseError
|
||||
metadata map[string]interface{}
|
||||
extensions map[string]interface{}
|
||||
deprecated bool
|
||||
contentEncodingEnabled *bool
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||
for _, each := range oneArgBlocks {
|
||||
each(b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// To bind the route to a function.
|
||||
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
|
||||
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
|
||||
b.function = function
|
||||
return b
|
||||
}
|
||||
|
||||
// Method specifies what HTTP method to match. Required.
|
||||
func (b *RouteBuilder) Method(method string) *RouteBuilder {
|
||||
b.httpMethod = method
|
||||
return b
|
||||
}
|
||||
|
||||
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
|
||||
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
|
||||
b.produces = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
|
||||
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
|
||||
b.consumes = mimeTypes
|
||||
return b
|
||||
}
|
||||
|
||||
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
|
||||
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
|
||||
b.currentPath = subPath
|
||||
return b
|
||||
}
|
||||
|
||||
// Doc tells what this route is all about. Optional.
|
||||
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
|
||||
b.doc = documentation
|
||||
return b
|
||||
}
|
||||
|
||||
// Notes is a verbose explanation of the operation behavior. Optional.
|
||||
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
||||
b.notes = notes
|
||||
return b
|
||||
}
|
||||
|
||||
// Reads tells what resource type will be read from the request payload. Optional.
|
||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
||||
func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder {
|
||||
fn := b.typeNameHandleFunc
|
||||
if fn == nil {
|
||||
fn = reflectTypeName
|
||||
}
|
||||
typeAsName := fn(sample)
|
||||
description := ""
|
||||
if len(optionalDescription) > 0 {
|
||||
description = optionalDescription[0]
|
||||
}
|
||||
b.readSample = sample
|
||||
bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}}
|
||||
bodyParameter.beBody()
|
||||
bodyParameter.Required(true)
|
||||
bodyParameter.DataType(typeAsName)
|
||||
b.Param(bodyParameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
|
||||
// Use this to modify or extend information for the Parameter (through its Data()).
|
||||
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
|
||||
for _, each := range b.parameters {
|
||||
if each.Data().Name == name {
|
||||
return each
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Writes tells what resource type will be written as the response payload. Optional.
|
||||
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
|
||||
b.writeSample = sample
|
||||
return b
|
||||
}
|
||||
|
||||
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
|
||||
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
|
||||
if b.parameters == nil {
|
||||
b.parameters = []*Parameter{}
|
||||
}
|
||||
b.parameters = append(b.parameters, parameter)
|
||||
return b
|
||||
}
|
||||
|
||||
// Operation allows you to document what the actual method/function call is of the Route.
|
||||
// Unless called, the operation name is derived from the RouteFunction set using To(..).
|
||||
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
|
||||
b.operation = name
|
||||
return b
|
||||
}
|
||||
|
||||
// ReturnsError is deprecated, use Returns instead.
|
||||
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
|
||||
log.Print("ReturnsError is deprecated, use Returns instead.")
|
||||
return b.Returns(code, message, model)
|
||||
}
|
||||
|
||||
// Returns allows you to document what responses (errors or regular) can be expected.
|
||||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
||||
err := ResponseError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
IsDefault: false, // this field is deprecated, use default response instead.
|
||||
}
|
||||
// lazy init because there is no NewRouteBuilder (yet)
|
||||
if b.errorMap == nil {
|
||||
b.errorMap = map[int]ResponseError{}
|
||||
}
|
||||
b.errorMap[code] = err
|
||||
return b
|
||||
}
|
||||
|
||||
// ReturnsWithHeaders is similar to Returns, but can specify response headers
|
||||
func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder {
|
||||
b.Returns(code, message, model)
|
||||
err := b.errorMap[code]
|
||||
err.Headers = headers
|
||||
b.errorMap[code] = err
|
||||
return b
|
||||
}
|
||||
|
||||
// DefaultReturns is a special Returns call that sets the default of the response.
|
||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||
b.defaultResponse = &ResponseError{
|
||||
Message: message,
|
||||
Model: model,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Metadata adds or updates a key=value pair to the metadata map.
|
||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
||||
if b.metadata == nil {
|
||||
b.metadata = map[string]interface{}{}
|
||||
}
|
||||
b.metadata[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// AddExtension adds or updates a key=value pair to the extensions map.
|
||||
func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder {
|
||||
if b.extensions == nil {
|
||||
b.extensions = map[string]interface{}{}
|
||||
}
|
||||
b.extensions[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
|
||||
func (b *RouteBuilder) Deprecate() *RouteBuilder {
|
||||
b.deprecated = true
|
||||
return b
|
||||
}
|
||||
|
||||
// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE
|
||||
// If a request does not include a content-type header then
|
||||
// depending on the method, it may return a 415 Unsupported Media.
|
||||
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
||||
func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder {
|
||||
b.allowedMethodsWithoutContentType = methods
|
||||
return b
|
||||
}
|
||||
|
||||
// ResponseError represents a response; not necessarily an error.
|
||||
type ResponseError struct {
|
||||
ExtensionProperties
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
Headers map[string]Header
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
// Header describes a header for a response of the API
|
||||
//
|
||||
// For more information: http://goo.gl/8us55a#headerObject
|
||||
type Header struct {
|
||||
*Items
|
||||
Description string
|
||||
}
|
||||
|
||||
// Items describe swagger simple schemas for headers
|
||||
type Items struct {
|
||||
Type string
|
||||
Format string
|
||||
Items *Items
|
||||
CollectionFormat string
|
||||
Default interface{}
|
||||
}
|
||||
|
||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||
b.rootPath = path
|
||||
return b
|
||||
}
|
||||
|
||||
// Filter appends a FilterFunction to the end of filters for this Route to build.
|
||||
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
|
||||
b.filters = append(b.filters, filter)
|
||||
return b
|
||||
}
|
||||
|
||||
// If sets a condition function that controls matching the Route based on custom logic.
|
||||
// The condition function is provided the HTTP request and should return true if the route
|
||||
// should be considered.
|
||||
//
|
||||
// Efficiency note: the condition function is called before checking the method, produces, and
|
||||
// consumes criteria, so that the correct HTTP status code can be returned.
|
||||
//
|
||||
// Lifecycle note: no filter functions have been called prior to calling the condition function,
|
||||
// so the condition function should not depend on any context that might be set up by container
|
||||
// or route filters.
|
||||
func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
|
||||
b.conditions = append(b.conditions, condition)
|
||||
return b
|
||||
}
|
||||
|
||||
// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response.
|
||||
func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder {
|
||||
b.contentEncodingEnabled = &enabled
|
||||
return b
|
||||
}
|
||||
|
||||
// If no specific Route path then set to rootPath
|
||||
// If no specific Produces then set to rootProduces
|
||||
// If no specific Consumes then set to rootConsumes
|
||||
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
||||
if len(b.produces) == 0 {
|
||||
b.produces = rootProduces
|
||||
}
|
||||
if len(b.consumes) == 0 {
|
||||
b.consumes = rootConsumes
|
||||
}
|
||||
}
|
||||
|
||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions.
|
||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
||||
b.typeNameHandleFunc = handler
|
||||
return b
|
||||
}
|
||||
|
||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
||||
func (b *RouteBuilder) Build() Route {
|
||||
pathExpr, err := newPathExpression(b.currentPath)
|
||||
if err != nil {
|
||||
log.Printf("Invalid path:%s because:%v", b.currentPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if b.function == nil {
|
||||
log.Printf("No function specified for route:" + b.currentPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
operationName := b.operation
|
||||
if len(operationName) == 0 && b.function != nil {
|
||||
// extract from definition
|
||||
operationName = nameOfFunction(b.function)
|
||||
}
|
||||
route := Route{
|
||||
Method: b.httpMethod,
|
||||
Path: concatPath(b.rootPath, b.currentPath),
|
||||
Produces: b.produces,
|
||||
Consumes: b.consumes,
|
||||
Function: b.function,
|
||||
Filters: b.filters,
|
||||
If: b.conditions,
|
||||
relativePath: b.currentPath,
|
||||
pathExpr: pathExpr,
|
||||
Doc: b.doc,
|
||||
Notes: b.notes,
|
||||
Operation: operationName,
|
||||
ParameterDocs: b.parameters,
|
||||
ResponseErrors: b.errorMap,
|
||||
DefaultResponse: b.defaultResponse,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample,
|
||||
Metadata: b.metadata,
|
||||
Deprecated: b.deprecated,
|
||||
contentEncodingEnabled: b.contentEncodingEnabled,
|
||||
allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType,
|
||||
}
|
||||
route.Extensions = b.extensions
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
// nameOfFunction returns the short name of the function f for documentation.
|
||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
||||
func nameOfFunction(f interface{}) string {
|
||||
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
|
||||
tokenized := strings.Split(fun.Name(), ".")
|
||||
last := tokenized[len(tokenized)-1]
|
||||
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
||||
if last == "func1" { // this could mean conflicts in API docs
|
||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
||||
last = "func" + fmt.Sprintf("%d", val)
|
||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
||||
}
|
||||
return last
|
||||
}
|
66
client/vendor/github.com/emicklei/go-restful/v3/route_reader.go
generated
vendored
Normal file
66
client/vendor/github.com/emicklei/go-restful/v3/route_reader.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2021 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
type RouteReader interface {
|
||||
Method() string
|
||||
Consumes() []string
|
||||
Path() string
|
||||
Doc() string
|
||||
Notes() string
|
||||
Operation() string
|
||||
ParameterDocs() []*Parameter
|
||||
// Returns a copy
|
||||
Metadata() map[string]interface{}
|
||||
Deprecated() bool
|
||||
}
|
||||
|
||||
type routeAccessor struct {
|
||||
route *Route
|
||||
}
|
||||
|
||||
func (r routeAccessor) Method() string {
|
||||
return r.route.Method
|
||||
}
|
||||
func (r routeAccessor) Consumes() []string {
|
||||
return r.route.Consumes[:]
|
||||
}
|
||||
func (r routeAccessor) Path() string {
|
||||
return r.route.Path
|
||||
}
|
||||
func (r routeAccessor) Doc() string {
|
||||
return r.route.Doc
|
||||
}
|
||||
func (r routeAccessor) Notes() string {
|
||||
return r.route.Notes
|
||||
}
|
||||
func (r routeAccessor) Operation() string {
|
||||
return r.route.Operation
|
||||
}
|
||||
func (r routeAccessor) ParameterDocs() []*Parameter {
|
||||
return r.route.ParameterDocs[:]
|
||||
}
|
||||
|
||||
// Returns a copy
|
||||
func (r routeAccessor) Metadata() map[string]interface{} {
|
||||
return copyMap(r.route.Metadata)
|
||||
}
|
||||
func (r routeAccessor) Deprecated() bool {
|
||||
return r.route.Deprecated
|
||||
}
|
||||
|
||||
// https://stackoverflow.com/questions/23057785/how-to-copy-a-map
|
||||
func copyMap(m map[string]interface{}) map[string]interface{} {
|
||||
cp := make(map[string]interface{})
|
||||
for k, v := range m {
|
||||
vm, ok := v.(map[string]interface{})
|
||||
if ok {
|
||||
cp[k] = copyMap(vm)
|
||||
} else {
|
||||
cp[k] = v
|
||||
}
|
||||
}
|
||||
return cp
|
||||
}
|
20
client/vendor/github.com/emicklei/go-restful/v3/router.go
generated
vendored
Normal file
20
client/vendor/github.com/emicklei/go-restful/v3/router.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "net/http"
|
||||
|
||||
// A RouteSelector finds the best matching Route given the input HTTP Request
|
||||
// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the
|
||||
// path parameters after the route has been selected.
|
||||
type RouteSelector interface {
|
||||
|
||||
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
|
||||
// It returns a selected Route and its containing WebService or an error indicating
|
||||
// a problem.
|
||||
SelectRoute(
|
||||
webServices []*WebService,
|
||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
|
||||
}
|
32
client/vendor/github.com/emicklei/go-restful/v3/service_error.go
generated
vendored
Normal file
32
client/vendor/github.com/emicklei/go-restful/v3/service_error.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
||||
type ServiceError struct {
|
||||
Code int
|
||||
Message string
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
// NewError returns a ServiceError using the code and reason
|
||||
func NewError(code int, message string) ServiceError {
|
||||
return ServiceError{Code: code, Message: message}
|
||||
}
|
||||
|
||||
// NewErrorWithHeader returns a ServiceError using the code, reason and header
|
||||
func NewErrorWithHeader(code int, message string, header http.Header) ServiceError {
|
||||
return ServiceError{Code: code, Message: message, Header: header}
|
||||
}
|
||||
|
||||
// Error returns a text representation of the service error
|
||||
func (s ServiceError) Error() string {
|
||||
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
||||
}
|
305
client/vendor/github.com/emicklei/go-restful/v3/web_service.go
generated
vendored
Normal file
305
client/vendor/github.com/emicklei/go-restful/v3/web_service.go
generated
vendored
Normal file
@@ -0,0 +1,305 @@
|
||||
package restful
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/v3/log"
|
||||
)
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
|
||||
type WebService struct {
|
||||
rootPath string
|
||||
pathExpr *pathExpression // cached compilation of rootPath as RegExp
|
||||
routes []Route
|
||||
produces []string
|
||||
consumes []string
|
||||
pathParameters []*Parameter
|
||||
filters []FilterFunction
|
||||
documentation string
|
||||
apiVersion string
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction
|
||||
|
||||
dynamicRoutes bool
|
||||
|
||||
// protects 'routes' if dynamic routes are enabled
|
||||
routesLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (w *WebService) SetDynamicRoutes(enable bool) {
|
||||
w.dynamicRoutes = enable
|
||||
}
|
||||
|
||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
||||
// into the restful documentation for the service.
|
||||
type TypeNameHandleFunction func(sample interface{}) string
|
||||
|
||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions. If not set, the web service will invoke
|
||||
// reflect.TypeOf(object).String().
|
||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
||||
w.typeNameHandleFunc = handler
|
||||
return w
|
||||
}
|
||||
|
||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
||||
// the reflection API.
|
||||
func reflectTypeName(sample interface{}) string {
|
||||
return reflect.TypeOf(sample).String()
|
||||
}
|
||||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
if err != nil {
|
||||
log.Printf("invalid path:%s because:%v", w.rootPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
w.pathExpr = compiled
|
||||
}
|
||||
|
||||
// ApiVersion sets the API version for documentation purposes.
|
||||
func (w *WebService) ApiVersion(apiVersion string) *WebService {
|
||||
w.apiVersion = apiVersion
|
||||
return w
|
||||
}
|
||||
|
||||
// Version returns the API version for documentation purposes.
|
||||
func (w *WebService) Version() string { return w.apiVersion }
|
||||
|
||||
// Path specifies the root URL template path of the WebService.
|
||||
// All Routes will be relative to this path.
|
||||
func (w *WebService) Path(root string) *WebService {
|
||||
w.rootPath = root
|
||||
if len(w.rootPath) == 0 {
|
||||
w.rootPath = "/"
|
||||
}
|
||||
w.compilePathExpression()
|
||||
return w
|
||||
}
|
||||
|
||||
// Param adds a PathParameter to document parameters used in the root path.
|
||||
func (w *WebService) Param(parameter *Parameter) *WebService {
|
||||
if w.pathParameters == nil {
|
||||
w.pathParameters = []*Parameter{}
|
||||
}
|
||||
w.pathParameters = append(w.pathParameters, parameter)
|
||||
return w
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) PathParameter(name, description string) *Parameter {
|
||||
return PathParameter(name, description)
|
||||
}
|
||||
|
||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func PathParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
|
||||
p.bePath()
|
||||
return p
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) QueryParameter(name, description string) *Parameter {
|
||||
return QueryParameter(name, description)
|
||||
}
|
||||
|
||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func QueryParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}}
|
||||
p.beQuery()
|
||||
return p
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func (w *WebService) BodyParameter(name, description string) *Parameter {
|
||||
return BodyParameter(name, description)
|
||||
}
|
||||
|
||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
||||
// It is initialized as required without a DataType.
|
||||
func BodyParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
|
||||
p.beBody()
|
||||
return p
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func (w *WebService) HeaderParameter(name, description string) *Parameter {
|
||||
return HeaderParameter(name, description)
|
||||
}
|
||||
|
||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
||||
// It is initialized as not required with string as its DataType.
|
||||
func HeaderParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beHeader()
|
||||
return p
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) FormParameter(name, description string) *Parameter {
|
||||
return FormParameter(name, description)
|
||||
}
|
||||
|
||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func FormParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beForm()
|
||||
return p
|
||||
}
|
||||
|
||||
// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes.
|
||||
// It is initialized as required with string as its DataType.
|
||||
func (w *WebService) MultiPartFormParameter(name, description string) *Parameter {
|
||||
return MultiPartFormParameter(name, description)
|
||||
}
|
||||
|
||||
func MultiPartFormParameter(name, description string) *Parameter {
|
||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||
p.beMultiPartForm()
|
||||
return p
|
||||
}
|
||||
|
||||
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
||||
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
builder.copyDefaults(w.produces, w.consumes)
|
||||
w.routes = append(w.routes, builder.Build())
|
||||
return w
|
||||
}
|
||||
|
||||
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
|
||||
func (w *WebService) RemoveRoute(path, method string) error {
|
||||
if !w.dynamicRoutes {
|
||||
return errors.New("dynamic routes are not enabled.")
|
||||
}
|
||||
w.routesLock.Lock()
|
||||
defer w.routesLock.Unlock()
|
||||
newRoutes := []Route{}
|
||||
for _, route := range w.routes {
|
||||
if route.Method == method && route.Path == path {
|
||||
continue
|
||||
}
|
||||
newRoutes = append(newRoutes, route)
|
||||
}
|
||||
w.routes = newRoutes
|
||||
return nil
|
||||
}
|
||||
|
||||
// Method creates a new RouteBuilder and initialize its http method
|
||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
||||
}
|
||||
|
||||
// Produces specifies that this WebService can produce one or more MIME types.
|
||||
// Http requests must have one of these values set for the Accept header.
|
||||
func (w *WebService) Produces(contentTypes ...string) *WebService {
|
||||
w.produces = contentTypes
|
||||
return w
|
||||
}
|
||||
|
||||
// Consumes specifies that this WebService can consume one or more MIME types.
|
||||
// Http requests must have one of these values set for the Content-Type header.
|
||||
func (w *WebService) Consumes(accepts ...string) *WebService {
|
||||
w.consumes = accepts
|
||||
return w
|
||||
}
|
||||
|
||||
// Routes returns the Routes associated with this WebService
|
||||
func (w *WebService) Routes() []Route {
|
||||
if !w.dynamicRoutes {
|
||||
return w.routes
|
||||
}
|
||||
// Make a copy of the array to prevent concurrency problems
|
||||
w.routesLock.RLock()
|
||||
defer w.routesLock.RUnlock()
|
||||
result := make([]Route, len(w.routes))
|
||||
for ix := range w.routes {
|
||||
result[ix] = w.routes[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RootPath returns the RootPath associated with this WebService. Default "/"
|
||||
func (w *WebService) RootPath() string {
|
||||
return w.rootPath
|
||||
}
|
||||
|
||||
// PathParameters return the path parameter names for (shared among its Routes)
|
||||
func (w *WebService) PathParameters() []*Parameter {
|
||||
return w.pathParameters
|
||||
}
|
||||
|
||||
// Filter adds a filter function to the chain of filters applicable to all its Routes
|
||||
func (w *WebService) Filter(filter FilterFunction) *WebService {
|
||||
w.filters = append(w.filters, filter)
|
||||
return w
|
||||
}
|
||||
|
||||
// Doc is used to set the documentation of this service.
|
||||
func (w *WebService) Doc(plainText string) *WebService {
|
||||
w.documentation = plainText
|
||||
return w
|
||||
}
|
||||
|
||||
// Documentation returns it.
|
||||
func (w *WebService) Documentation() string {
|
||||
return w.documentation
|
||||
}
|
||||
|
||||
/*
|
||||
Convenience methods
|
||||
*/
|
||||
|
||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
}
|
||||
|
||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
}
|
||||
|
||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
}
|
||||
|
||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
}
|
||||
|
||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
}
|
||||
|
||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
}
|
||||
|
||||
// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath)
|
||||
func (w *WebService) OPTIONS(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath)
|
||||
}
|
39
client/vendor/github.com/emicklei/go-restful/v3/web_service_container.go
generated
vendored
Normal file
39
client/vendor/github.com/emicklei/go-restful/v3/web_service_container.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
|
||||
var DefaultContainer *Container
|
||||
|
||||
func init() {
|
||||
DefaultContainer = NewContainer()
|
||||
DefaultContainer.ServeMux = http.DefaultServeMux
|
||||
}
|
||||
|
||||
// If set the true then panics will not be caught to return HTTP 500.
|
||||
// In that case, Route functions are responsible for handling any error situation.
|
||||
// Default value is false = recover from panics. This has performance implications.
|
||||
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
|
||||
var DoNotRecover = false
|
||||
|
||||
// Add registers a new WebService add it to the DefaultContainer.
|
||||
func Add(service *WebService) {
|
||||
DefaultContainer.Add(service)
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction from the DefaultContainer.
|
||||
// These are called before dispatching a http.Request to a WebService.
|
||||
func Filter(filter FilterFunction) {
|
||||
DefaultContainer.Filter(filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
|
||||
func RegisteredWebServices() []*WebService {
|
||||
return DefaultContainer.RegisteredWebServices()
|
||||
}
|
6
client/vendor/github.com/evanphx/json-patch/.gitignore
generated
vendored
Normal file
6
client/vendor/github.com/evanphx/json-patch/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# editor and IDE paraphernalia
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# macOS paraphernalia
|
||||
.DS_Store
|
25
client/vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
25
client/vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
317
client/vendor/github.com/evanphx/json-patch/README.md
generated
vendored
Normal file
317
client/vendor/github.com/evanphx/json-patch/README.md
generated
vendored
Normal file
@@ -0,0 +1,317 @@
|
||||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionality for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
[](http://godoc.org/github.com/evanphx/json-patch)
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
[](https://goreportcard.com/report/github.com/evanphx/json-patch)
|
||||
|
||||
# Get It!
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch/v5
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
|
||||
# Use It!
|
||||
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
|
||||
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
|
||||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||
This defaults to `true` and enables the non-standard practice of allowing
|
||||
negative indices to mean indices starting at the end of an array. This
|
||||
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||
false`.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
|
||||
which limits the total size increase in bytes caused by "copy" operations in a
|
||||
patch. It defaults to 0, which means there is no limit.
|
||||
|
||||
These global variables control the behavior of `jsonpatch.Apply`.
|
||||
|
||||
An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
|
||||
is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
|
||||
|
||||
Structure `jsonpatch.ApplyOptions` includes the configuration options above
|
||||
and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
|
||||
|
||||
When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
|
||||
`remove` operations whose `path` points to a non-existent location in the JSON document.
|
||||
`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
|
||||
returning an error when hitting a missing `path` on `remove`.
|
||||
|
||||
When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
|
||||
that `add` operations produce all the `path` elements that are missing from the target object.
|
||||
|
||||
Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
|
||||
whose values are populated from the global configuration variables.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
||||
It can describe the changes needed to convert from the original to the
|
||||
modified JSON document.
|
||||
|
||||
Once you have a merge patch, you can apply it to other JSON documents using the
|
||||
`jsonpatch.MergePatch(document, patch)` function.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Let's create a merge patch from these two documents...
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
target := []byte(`{"name": "Jane", "age": 24}`)
|
||||
|
||||
patch, err := jsonpatch.CreateMergePatch(original, target)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now lets apply the patch against a different JSON document...
|
||||
|
||||
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
|
||||
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
|
||||
|
||||
fmt.Printf("patch document: %s\n", patch)
|
||||
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated alternative doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
You can create patch objects using `DecodePatch([]byte)`, which can then
|
||||
be applied against JSON documents.
|
||||
|
||||
The following is an example of creating a patch from two operations, and
|
||||
applying it against a JSON document.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
patchJSON := []byte(`[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]`)
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(patchJSON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
modified, err := patch.Apply(original)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Original document: %s\n", original)
|
||||
fmt.Printf("Modified document: %s\n", modified)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
Original document: {"name": "John", "age": 24, "height": 3.21}
|
||||
Modified document: {"age":24,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Comparing JSON documents
|
||||
Due to potential whitespace and ordering differences, one cannot simply compare
|
||||
JSON strings or byte-arrays directly.
|
||||
|
||||
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
|
||||
determine if two JSON documents are _structurally_ equal. This ignores
|
||||
whitespace differences, and key-value ordering.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
similar := []byte(`
|
||||
{
|
||||
"age": 24,
|
||||
"height": 3.21,
|
||||
"name": "John"
|
||||
}
|
||||
`)
|
||||
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
|
||||
|
||||
if jsonpatch.Equal(original, similar) {
|
||||
fmt.Println(`"original" is structurally equal to "similar"`)
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "different"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "different"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
Given two JSON merge patch documents, it is possible to combine them into a
|
||||
single merge patch which can describe both set of changes.
|
||||
|
||||
The resulting merge patch can be used such that applying it results in a
|
||||
document structurally similar as merging each merge patch to the document
|
||||
in succession.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
|
||||
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
|
||||
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
|
||||
|
||||
// Let's combine these merge patch documents...
|
||||
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply each patch individual against the original document
|
||||
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply the combined patch against the original document
|
||||
|
||||
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do both result in the same thing? They should!
|
||||
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
|
||||
fmt.Println("Both JSON documents are structurally the same!")
|
||||
}
|
||||
|
||||
fmt.Printf("combined merge patch: %s", combinedPatch)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
Both JSON documents are structurally the same!
|
||||
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
|
||||
```
|
||||
|
||||
# CLI for comparing JSON documents
|
||||
You can install the commandline program `json-patch`.
|
||||
|
||||
This program can take multiple JSON patch documents as arguments,
|
||||
and fed a JSON document from `stdin`. It will apply the patch(es) against
|
||||
the document and output the modified doc.
|
||||
|
||||
**patch.1.json**
|
||||
```json
|
||||
[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]
|
||||
```
|
||||
|
||||
**patch.2.json**
|
||||
```json
|
||||
[
|
||||
{"op": "add", "path": "/address", "value": "123 Main St"},
|
||||
{"op": "replace", "path": "/age", "value": "21"}
|
||||
]
|
||||
```
|
||||
|
||||
**document.json**
|
||||
```json
|
||||
{
|
||||
"name": "John",
|
||||
"age": 24,
|
||||
"height": 3.21
|
||||
}
|
||||
```
|
||||
|
||||
You can then run:
|
||||
|
||||
```bash
|
||||
$ go install github.com/evanphx/json-patch/cmd/json-patch
|
||||
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
|
||||
{"address":"123 Main St","age":"21","name":"Jane"}
|
||||
```
|
||||
|
||||
# Help It!
|
||||
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
|
||||
or [create a PR](https://github.com/evanphx/json-patch/compare).
|
||||
|
||||
|
||||
Before creating a pull request, we'd ask that you make sure tests are passing
|
||||
and that you have added new tests when applicable.
|
||||
|
||||
Contributors can run tests using:
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
Builds for pull requests are tested automatically
|
||||
using [TravisCI](https://travis-ci.org/evanphx/json-patch).
|
38
client/vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
Normal file
38
client/vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package jsonpatch
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AccumulatedCopySizeError is an error type returned when the accumulated size
|
||||
// increase caused by copy operations in a patch operation has exceeded the
|
||||
// limit.
|
||||
type AccumulatedCopySizeError struct {
|
||||
limit int64
|
||||
accumulated int64
|
||||
}
|
||||
|
||||
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
|
||||
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
|
||||
return &AccumulatedCopySizeError{limit: l, accumulated: a}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *AccumulatedCopySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
|
||||
}
|
||||
|
||||
// ArraySizeError is an error type returned when the array size has exceeded
|
||||
// the limit.
|
||||
type ArraySizeError struct {
|
||||
limit int
|
||||
size int
|
||||
}
|
||||
|
||||
// NewArraySizeError returns an ArraySizeError.
|
||||
func NewArraySizeError(l, s int) *ArraySizeError {
|
||||
return &ArraySizeError{limit: l, size: s}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *ArraySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
|
||||
}
|
389
client/vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
389
client/vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
@@ -0,0 +1,389 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
if !mergeMerge {
|
||||
pruneNulls(v)
|
||||
}
|
||||
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
}
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
if len(bt) != len(at) {
|
||||
return false
|
||||
}
|
||||
for key := range bt {
|
||||
av, aOK := at[key]
|
||||
bv, bOK := bt[key]
|
||||
if aOK != bOK {
|
||||
return false
|
||||
}
|
||||
if !matchesValue(av, bv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
851
client/vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
851
client/vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
@@ -0,0 +1,851 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var (
|
||||
// SupportNegativeIndices decides whether to support non-standard practice of
|
||||
// allowing negative indices to mean indices starting at the end of an array.
|
||||
// Default to true.
|
||||
SupportNegativeIndices bool = true
|
||||
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
|
||||
// "copy" operations in a patch.
|
||||
AccumulatedCopySizeLimit int64 = 0
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrMissing = errors.New("missing value")
|
||||
ErrUnknownType = errors.New("unknown object type")
|
||||
ErrInvalid = errors.New("invalid state detected")
|
||||
ErrInvalidIndex = errors.New("invalid index referenced")
|
||||
)
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
// Operation is a single JSON-Patch step, such as a single 'add' operation.
|
||||
type Operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of Operations.
|
||||
type Patch []Operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
if src == nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
sz := len(a)
|
||||
ra := make(json.RawMessage, sz)
|
||||
copy(ra, a)
|
||||
return newLazyNode(&ra), sz, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.doc) != len(o.doc) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if (v == nil) != (ov == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Kind reads the "op" field of the Operation.
|
||||
func (o Operation) Kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// Path reads the "path" field of the Operation.
|
||||
func (o Operation) Path() (string, error) {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
|
||||
}
|
||||
|
||||
// From reads the "from" field of the Operation.
|
||||
func (o Operation) From() (string, error) {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
|
||||
}
|
||||
|
||||
func (o Operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueInterface decodes the operation value into an interface.
|
||||
func (o Operation) ValueInterface() (interface{}, error) {
|
||||
if obj, ok := o["value"]; ok && obj != nil {
|
||||
var v interface{}
|
||||
|
||||
err := json.Unmarshal(*obj, &v)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// set should only be used to implement the "replace" operation, so "key" must
|
||||
// be an already existing index in "d".
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
}
|
||||
|
||||
(*d)[idx] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(ary) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(ary)
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(cur) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(cur)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.add(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in add for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "replace operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
val := op.value()
|
||||
|
||||
if val.which == eRaw {
|
||||
if !val.tryDoc() {
|
||||
if !val.tryAry() {
|
||||
return errors.Wrapf(err, "replace operation value must be object or array")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch val.which {
|
||||
case eAry:
|
||||
*doc = &val.ary
|
||||
case eDoc:
|
||||
*doc = &val.doc
|
||||
case eRaw:
|
||||
return errors.Wrapf(err, "replace operation hit impossible case")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
err = con.set(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op Operation) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
err = con.add(key, val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "test operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
var self lazyNode
|
||||
|
||||
switch sv := (*doc).(type) {
|
||||
case *partialDoc:
|
||||
self.doc = *sv
|
||||
self.which = eDoc
|
||||
case *partialArray:
|
||||
self.ary = *sv
|
||||
self.which = eAry
|
||||
}
|
||||
|
||||
if self.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in test for path: '%s'", path)
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copy operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in copy for from: '%s'", from)
|
||||
}
|
||||
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
valCopy, sz, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while performing deep copy")
|
||||
}
|
||||
|
||||
(*accumulatedCopySize) += int64(sz)
|
||||
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
|
||||
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
|
||||
}
|
||||
|
||||
err = con.add(key, valCopy)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while adding value during copy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
if len(doc) == 0 {
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
var accumulatedCopySize int64
|
||||
|
||||
for _, op := range p {
|
||||
switch op.Kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op, &accumulatedCopySize)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.Kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
29
client/vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
29
client/vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
run:
|
||||
timeout: 1m
|
||||
tests: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
6
client/vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
6
client/vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v1.0.0-rc1
|
||||
|
||||
This is the first logged release. Major changes (including breaking changes)
|
||||
have occurred since earlier tags.
|
17
client/vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
17
client/vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
# Contributing
|
||||
|
||||
Logr is open to pull-requests, provided they fit within the intended scope of
|
||||
the project. Specifically, this library aims to be VERY small and minimalist,
|
||||
with no external dependencies.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This project intends to follow [semantic versioning](http://semver.org) and
|
||||
is very strict about compatibility. Any proposed changes MUST follow those
|
||||
rules.
|
||||
|
||||
## Performance
|
||||
|
||||
As a logging library, logr must be as light-weight as possible. Any proposed
|
||||
code change must include results of running the [benchmark](./benchmark)
|
||||
before and after the change.
|
201
client/vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
201
client/vendor/github.com/go-logr/logr/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
282
client/vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
282
client/vendor/github.com/go-logr/logr/README.md
generated
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
# A minimal logging API for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
without becoming coupled to a particular logging implementation. This is not
|
||||
an implementation of logging - it is an API. In fact it is two APIs with two
|
||||
different sets of users.
|
||||
|
||||
The `Logger` type is intended for application and library authors. It provides
|
||||
a relatively small API which can be used everywhere you want to emit logs. It
|
||||
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
||||
`LogSink` interface.
|
||||
|
||||
The `LogSink` interface is intended for logging library implementers. It is a
|
||||
pure interface which can be implemented by logging frameworks to provide the actual logging
|
||||
functionality.
|
||||
|
||||
This decoupling allows application and library developers to write code in
|
||||
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
||||
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
||||
Application developers can then switch out implementations as necessary.
|
||||
|
||||
Many people assert that libraries should not be logging, and as such efforts
|
||||
like this are pointless. Those people are welcome to convince the authors of
|
||||
the tens-of-thousands of libraries that *DO* write logs that they are all
|
||||
wrong. In the meantime, logr takes a more practical approach.
|
||||
|
||||
## Typical usage
|
||||
|
||||
Somewhere, early in an application's life, it will make a decision about which
|
||||
logging library (implementation) it actually wants to use. Something like:
|
||||
|
||||
```
|
||||
func main() {
|
||||
// ... other setup code ...
|
||||
|
||||
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
||||
// which takes some initial parameters and returns a logr.Logger.
|
||||
logger := logimpl.New(param1, param2)
|
||||
|
||||
// ... other setup code ...
|
||||
```
|
||||
|
||||
Most apps will call into other libraries, create structures to govern the flow,
|
||||
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
||||
in structs, or even used as a package-global variable, if needed. For example:
|
||||
|
||||
```
|
||||
app := createTheAppObject(logger)
|
||||
app.Run()
|
||||
```
|
||||
|
||||
Outside of this early setup, no other packages need to know about the choice of
|
||||
implementation. They write logs in terms of the `logr.Logger` that they
|
||||
received:
|
||||
|
||||
```
|
||||
type appObject struct {
|
||||
// ... other fields ...
|
||||
logger logr.Logger
|
||||
// ... other fields ...
|
||||
}
|
||||
|
||||
func (app *appObject) Run() {
|
||||
app.logger.Info("starting up", "timestamp", time.Now())
|
||||
|
||||
// ... app code ...
|
||||
```
|
||||
|
||||
## Background
|
||||
|
||||
If the Go standard library had defined an interface for logging, this project
|
||||
probably would not be needed. Alas, here we are.
|
||||
|
||||
### Inspiration
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
||||
he has to say, and it largely aligns with our own experiences.
|
||||
|
||||
### Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1. Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. This
|
||||
package restricts the logging API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
## Implementations (non-exhaustive)
|
||||
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## FAQ
|
||||
|
||||
### Conceptual
|
||||
|
||||
#### Why structured logging?
|
||||
|
||||
- **Structured logs are more easily queryable**: Since you've got
|
||||
key-value pairs, it's much easier to query your structured logs for
|
||||
particular values by filtering on the contents of a particular key --
|
||||
think searching request logs for error codes, Kubernetes reconcilers for
|
||||
the name and namespace of the reconciled object, etc.
|
||||
|
||||
- **Structured logging makes it easier to have cross-referenceable logs**:
|
||||
Similarly to searchability, if you maintain conventions around your
|
||||
keys, it becomes easy to gather all log lines related to a particular
|
||||
concept.
|
||||
|
||||
- **Structured logs allow better dimensions of filtering**: if you have
|
||||
structure to your logs, you've got more precise control over how much
|
||||
information is logged -- you might choose in a particular configuration
|
||||
to log certain keys but not others, only log lines where a certain key
|
||||
matches a certain value, etc., instead of just having v-levels and names
|
||||
to key off of.
|
||||
|
||||
- **Structured logs better represent structured data**: sometimes, the
|
||||
data that you want to log is inherently structured (think tuple-link
|
||||
objects.) Structured logs allow you to preserve that structure when
|
||||
outputting.
|
||||
|
||||
#### Why V-levels?
|
||||
|
||||
**V-levels give operators an easy way to control the chattiness of log
|
||||
operations**. V-levels provide a way for a given package to distinguish
|
||||
the relative importance or verbosity of a given log message. Then, if
|
||||
a particular logger or package is logging too many messages, the user
|
||||
of the package can simply change the v-levels for that library.
|
||||
|
||||
#### Why not named levels, like Info/Warning/Error?
|
||||
|
||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||
from Dave's ideas](#differences-from-daves-ideas).
|
||||
|
||||
#### Why not allow format strings, too?
|
||||
|
||||
**Format strings negate many of the benefits of structured logs**:
|
||||
|
||||
- They're not easily searchable without resorting to fuzzy searching,
|
||||
regular expressions, etc.
|
||||
|
||||
- They don't store structured data well, since contents are flattened into
|
||||
a string.
|
||||
|
||||
- They're not cross-referenceable.
|
||||
|
||||
- They don't compress easily, since the message is not constant.
|
||||
|
||||
(Unless you turn positional parameters into key-value pairs with numerical
|
||||
keys, at which point you've gotten key-value logging with meaningless
|
||||
keys.)
|
||||
|
||||
### Practical
|
||||
|
||||
#### Why key-value pairs, and not a map?
|
||||
|
||||
Key-value pairs are *much* easier to optimize, especially around
|
||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||
that show this quite nicely.
|
||||
|
||||
While the interface ends up being a little less obvious, you get
|
||||
potentially better performance, plus avoid making users type
|
||||
`map[string]string{}` every time they want to log.
|
||||
|
||||
#### What if my V-levels differ between libraries?
|
||||
|
||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||
`WithName` method to pass different loggers to different libraries.
|
||||
|
||||
Generally, you should take care to ensure that you have relatively
|
||||
consistent V-levels within a given logger, however, as this makes deciding
|
||||
on what verbosity of logs to request easier.
|
||||
|
||||
#### But I really want to use a format string!
|
||||
|
||||
That's not actually a question. Assuming your question is "how do
|
||||
I convert my mental model of logging with format strings to logging with
|
||||
constant messages":
|
||||
|
||||
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
||||
and use that as a message.
|
||||
|
||||
2. For every place you'd write a format specifier, look to the word before
|
||||
it, and add that as a key value pair.
|
||||
|
||||
For instance, consider the following examples (all taken from spots in the
|
||||
Kubernetes codebase):
|
||||
|
||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||
error", "code", responseCode)`
|
||||
|
||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||
response when requesting url", "attempt", retries, "after
|
||||
seconds", seconds, "url", url)`
|
||||
|
||||
If you *really* must use a format string, use it in a key's value, and
|
||||
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||
this is necessary should be few and far between.
|
||||
|
||||
#### How do I choose my V-levels?
|
||||
|
||||
This is basically the only hard constraint: increase V-levels to denote
|
||||
more verbose or more debug-y logs.
|
||||
|
||||
Otherwise, you can start out with `0` as "you always want to see this",
|
||||
`1` as "common logging that you might *possibly* want to turn off", and
|
||||
`10` as "I would like to performance-test your log collection stack."
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs.)
|
||||
|
||||
#### How do I choose my keys?
|
||||
|
||||
Keys are fairly flexible, and can hold more or less any string
|
||||
value. For best compatibility with implementations and consistency
|
||||
with existing code in other projects, there are a few conventions you
|
||||
should consider.
|
||||
|
||||
- Make your keys human-readable.
|
||||
- Constant keys are generally a good idea.
|
||||
- Be consistent across your codebase.
|
||||
- Keys should naturally match parts of the message string.
|
||||
- Use lower case for simple keys and
|
||||
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
||||
more complex ones. Kubernetes is one example of a project that has
|
||||
[adopted that
|
||||
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
||||
|
||||
While key names are mostly unrestricted (and spaces are acceptable),
|
||||
it's generally a good idea to stick to printable ascii characters, or at
|
||||
least match the general character set of your log lines.
|
||||
|
||||
#### Why should keys be constant values?
|
||||
|
||||
The point of structured logging is to make later log processing easier. Your
|
||||
keys are, effectively, the schema of each log message. If you use different
|
||||
keys across instances of the same log line, you will make your structured logs
|
||||
much harder to use. `Sprintf()` is for values, not for keys!
|
||||
|
||||
#### Why is this not a pure interface?
|
||||
|
||||
The Logger type is implemented as a struct in order to allow the Go compiler to
|
||||
optimize things like high-V `Info` logs that are not triggered. Not all of
|
||||
these implementations are implemented yet, but this structure was suggested as
|
||||
a way to ensure they *can* be implemented. All of the real work is behind the
|
||||
`LogSink` interface.
|
||||
|
||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
54
client/vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
54
client/vendor/github.com/go-logr/logr/discard.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2020 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// Discard returns a Logger that discards all messages logged to it. It can be
|
||||
// used whenever the caller is not interested in the logs. Logger instances
|
||||
// produced by this function always compare as equal.
|
||||
func Discard() Logger {
|
||||
return Logger{
|
||||
level: 0,
|
||||
sink: discardLogSink{},
|
||||
}
|
||||
}
|
||||
|
||||
// discardLogSink is a LogSink that discards all messages.
|
||||
type discardLogSink struct{}
|
||||
|
||||
// Verify that it actually implements the interface
|
||||
var _ LogSink = discardLogSink{}
|
||||
|
||||
func (l discardLogSink) Init(RuntimeInfo) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Enabled(int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (l discardLogSink) Info(int, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) Error(error, string, ...interface{}) {
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l discardLogSink) WithName(string) LogSink {
|
||||
return l
|
||||
}
|
510
client/vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
510
client/vendor/github.com/go-logr/logr/logr.go
generated
vendored
Normal file
@@ -0,0 +1,510 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
|
||||
// Package logr defines a general-purpose logging API and abstract interfaces
|
||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||
// while callers can implement logging with whatever backend is appropriate.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||
// methods, which defers the actual logging to a LogSink interface. The main
|
||||
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
||||
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
||||
// "structured logging".
|
||||
//
|
||||
// With Go's standard log package, we might write:
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// Errors are much the same. Instead of:
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// We'd write:
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// Info() and Error() are very similar, but they are separate methods so that
|
||||
// LogSink implementations can choose to do things like attach additional
|
||||
// information (such as stack traces) on calls to Error(). Error() messages are
|
||||
// always logged, regardless of the current verbosity. If there is no error
|
||||
// instance available, passing nil is valid.
|
||||
//
|
||||
// Verbosity
|
||||
//
|
||||
// Often we want to log information only when the application in "verbose
|
||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||
// The higher the V-level of a log line, the less critical it is considered.
|
||||
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
||||
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
||||
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
||||
// Error messages do not have a verbosity level and are always logged.
|
||||
//
|
||||
// Where we might have written:
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// We can write:
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// Logger Names
|
||||
//
|
||||
// Logger instances can have name strings so that all messages logged through
|
||||
// that instance have additional context. For example, you might want to add
|
||||
// a subsystem name:
|
||||
//
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
//
|
||||
// The WithName() method returns a new Logger, which can be passed to
|
||||
// constructors or other functions for further use. Repeated use of WithName()
|
||||
// will accumulate name "segments". These name segments will be joined in some
|
||||
// way by the LogSink implementation. It is strongly recommended that name
|
||||
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
||||
// not contain characters that could muddle the log output or confuse the
|
||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||
// quotes, etc).
|
||||
//
|
||||
// Saved Values
|
||||
//
|
||||
// Logger instances can store any number of key/value pairs, which will be
|
||||
// logged alongside all messages logged through that instance. For example,
|
||||
// you might want to create a Logger instance per managed object:
|
||||
//
|
||||
// With the standard log package, we might write:
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr we'd write:
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
//
|
||||
// Best Practices
|
||||
//
|
||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||
// might have a lot of freedom to differentiate. There are, however, some
|
||||
// things to consider.
|
||||
//
|
||||
// The log message consists of a constant message attached to the log line.
|
||||
// This should generally be a simple description of what's occurring, and should
|
||||
// never be a format string. Variable information can then be attached using
|
||||
// named values.
|
||||
//
|
||||
// Keys are arbitrary strings, but should generally be constant values. Values
|
||||
// may be any Go value, but how the value is formatted is determined by the
|
||||
// LogSink implementation.
|
||||
//
|
||||
// Logger instances are meant to be passed around by value. Code that receives
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||
// because it has no LogSink. Therefore this null logger should never be passed
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// * be constant (not dependent on input data)
|
||||
// * contain only printable characters
|
||||
// * not contain whitespace or punctuation
|
||||
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
// * "caller": the calling information (file/line) of a particular log line
|
||||
// * "error": the underlying error value in the `Error` method
|
||||
// * "level": the log level
|
||||
// * "logger": the name of the associated logger
|
||||
// * "msg": the log message
|
||||
// * "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// * "ts": the timestamp for a log line
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// Break Glass
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// Logger grants access to the sink to enable type assertions like this:
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Custom `With*` functions can be implemented by copying the complete
|
||||
// Logger struct and replacing the sink in the copy:
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||
// existing Logger. Source code attribution might not work correctly and
|
||||
// unexported fields in Logger get lost.
|
||||
//
|
||||
// Beware that the same LogSink instance may be shared by different logger
|
||||
// instances. Calling functions that modify the LogSink will affect all of
|
||||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users.
|
||||
func New(sink LogSink) Logger {
|
||||
logger := Logger{}
|
||||
logger.setSink(sink)
|
||||
sink.Init(runtimeInfo)
|
||||
return logger
|
||||
}
|
||||
|
||||
// setSink stores the sink and updates any related fields. It mutates the
|
||||
// logger and thus is only safe to use for loggers that are not currently being
|
||||
// used concurrently.
|
||||
func (l *Logger) setSink(sink LogSink) {
|
||||
l.sink = sink
|
||||
}
|
||||
|
||||
// GetSink returns the stored sink.
|
||||
func (l Logger) GetSink() LogSink {
|
||||
return l.sink
|
||||
}
|
||||
|
||||
// WithSink returns a copy of the logger with the new sink.
|
||||
func (l Logger) WithSink(sink LogSink) Logger {
|
||||
l.setSink(sink)
|
||||
return l
|
||||
}
|
||||
|
||||
// Logger is an interface to an abstract logging implementation. This is a
|
||||
// concrete type for performance reasons, but all the real work is passed on to
|
||||
// a LogSink. Implementations of LogSink should provide their own constructors
|
||||
// that return Logger, not LogSink.
|
||||
//
|
||||
// The underlying sink can be accessed through GetSink and be modified through
|
||||
// WithSink. This enables the implementation of custom extensions (see "Break
|
||||
// Glass" in the package documentation). Normally the sink should be used only
|
||||
// indirectly.
|
||||
type Logger struct {
|
||||
sink LogSink
|
||||
level int
|
||||
}
|
||||
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
return l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to the log
|
||||
// line. The key/value pairs can then be used to add additional variable
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||
if l.Enabled() {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Info(l.level, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to Info, but may have unique behavior, and should be
|
||||
// preferred for logging errors (see the package documentations for more
|
||||
// information). The log message will always be emitted, regardless of
|
||||
// verbosity level.
|
||||
//
|
||||
// The msg argument should be used to add context to any underlying error,
|
||||
// while the err argument should be used to attach the actual error that
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Error(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// V returns a new Logger instance for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V-levels are additive. A higher verbosity
|
||||
// level means a log message is less important. Negative V-levels are treated
|
||||
// as 0.
|
||||
func (l Logger) V(level int) Logger {
|
||||
if level < 0 {
|
||||
level = 0
|
||||
}
|
||||
l.level += level
|
||||
return l
|
||||
}
|
||||
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithName returns a new Logger instance with the specified name element added
|
||||
// to the Logger's name. Successive calls with WithName append additional
|
||||
// suffixes to the Logger's name. It's strongly recommended that name segments
|
||||
// contain only letters, digits, and hyphens (see the package documentation for
|
||||
// more information).
|
||||
func (l Logger) WithName(name string) Logger {
|
||||
l.setSink(l.sink.WithName(name))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
||||
// specified number of frames when logging call site information, if possible.
|
||||
// This is useful for users who have helper functions between the "real" call
|
||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||
// should be to the direct caller of this function. If depth is 1 the
|
||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||
// are additive.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// it will be called and the result returned. If the implementation does not
|
||||
// support CallDepthLogSink, the original Logger will be returned.
|
||||
//
|
||||
// To skip one level, WithCallStackHelper() should be used instead of
|
||||
// WithCallDepth(1) because it works with implementions that support the
|
||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||
func (l Logger) WithCallDepth(depth int) Logger {
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallStackHelper returns a new Logger instance that skips the direct
|
||||
// caller when logging call site information, if possible. This is useful for
|
||||
// users who have helper functions between the "real" call site and the actual
|
||||
// calls to Logger methods and want to support loggers which depend on marking
|
||||
// each individual helper function, like loggers based on testing.T.
|
||||
//
|
||||
// In addition to using that new logger instance, callers also must call the
|
||||
// returned function.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
||||
// WithCallStackHelper() method, that will be also called. If the
|
||||
// implementation does not support either of these, the original Logger will be
|
||||
// returned.
|
||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
var helper func()
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(1))
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
helper = withHelper.GetCallStackHelper()
|
||||
} else {
|
||||
helper = func() {}
|
||||
}
|
||||
return helper, l
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
// CallDepth is the number of call frames the logr library adds between the
|
||||
// end-user and the LogSink. LogSink implementations which choose to print
|
||||
// the original logging site (e.g. file & line) should climb this many
|
||||
// additional frames to find it.
|
||||
CallDepth int
|
||||
}
|
||||
|
||||
// runtimeInfo is a static global. It must not be changed at run time.
|
||||
var runtimeInfo = RuntimeInfo{
|
||||
CallDepth: 1,
|
||||
}
|
||||
|
||||
// LogSink represents a logging implementation. End-users will generally not
|
||||
// interact with this type.
|
||||
type LogSink interface {
|
||||
// Init receives optional information about the logr library for LogSink
|
||||
// implementations that need it.
|
||||
Init(info RuntimeInfo)
|
||||
|
||||
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
||||
// For example, commandline flags might be used to set the logging
|
||||
// verbosity and disable some info logs.
|
||||
Enabled(level int) bool
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
// The level argument is provided for optional logging. This method will
|
||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||
// details.
|
||||
Info(level int, msg string, keysAndValues ...interface{})
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as
|
||||
// context. See Logger.Error for more details.
|
||||
Error(err error, msg string, keysAndValues ...interface{})
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||
// Logger.WithValues for more details.
|
||||
WithValues(keysAndValues ...interface{}) LogSink
|
||||
|
||||
// WithName returns a new LogSink with the specified name appended. See
|
||||
// Logger.WithName for more details.
|
||||
WithName(name string) LogSink
|
||||
}
|
||||
|
||||
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as file,
|
||||
// function, or line) would otherwise log information about the intermediate
|
||||
// helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required to
|
||||
// support it.
|
||||
type CallDepthLogSink interface {
|
||||
// WithCallDepth returns a LogSink that will offset the call
|
||||
// stack by the specified number of frames when logging call
|
||||
// site information.
|
||||
//
|
||||
// If depth is 0, the LogSink should skip exactly the number
|
||||
// of call frames defined in RuntimeInfo.CallDepth when Info
|
||||
// or Error are called, i.e. the attribution should be to the
|
||||
// direct caller of Logger.Info or Logger.Error.
|
||||
//
|
||||
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
||||
// Successive calls to this are additive.
|
||||
WithCallDepth(depth int) LogSink
|
||||
}
|
||||
|
||||
// CallStackHelperLogSink represents a Logger that knows how to climb
|
||||
// the call stack to identify the original call site and can skip
|
||||
// intermediate helper functions if they mark themselves as
|
||||
// helper. Go's testing package uses that approach.
|
||||
//
|
||||
// This is useful for users who have helper functions between the
|
||||
// "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as
|
||||
// file, function, or line) would otherwise log information about the
|
||||
// intermediate helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required
|
||||
// to support it. Implementations that choose to support this must not
|
||||
// simply implement it as WithCallDepth(1), because
|
||||
// Logger.WithCallStackHelper will call both methods if they are
|
||||
// present. This should only be implemented for LogSinks that actually
|
||||
// need it, as with testing.T.
|
||||
type CallStackHelperLogSink interface {
|
||||
// GetCallStackHelper returns a function that must be called
|
||||
// to mark the direct caller as helper function when logging
|
||||
// call site information.
|
||||
GetCallStackHelper() func()
|
||||
}
|
||||
|
||||
// Marshaler is an optional interface that logged values may choose to
|
||||
// implement. Loggers with structured output, such as JSON, should
|
||||
// log the object return by the MarshalLog method instead of the
|
||||
// original value.
|
||||
type Marshaler interface {
|
||||
// MarshalLog can be used to:
|
||||
// - ensure that structs are not logged as strings when the original
|
||||
// value has a String method: return a different type without a
|
||||
// String method
|
||||
// - select which fields of a complex type should get logged:
|
||||
// return a simpler struct with fewer fields
|
||||
// - log unexported fields: return a different struct
|
||||
// with exported fields
|
||||
//
|
||||
// It may return any value of any type.
|
||||
MarshalLog() interface{}
|
||||
}
|
26
client/vendor/github.com/go-openapi/jsonpointer/.editorconfig
generated
vendored
Normal file
26
client/vendor/github.com/go-openapi/jsonpointer/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Set default charset
|
||||
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
|
||||
charset = utf-8
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
1
client/vendor/github.com/go-openapi/jsonpointer/.gitignore
generated
vendored
Normal file
1
client/vendor/github.com/go-openapi/jsonpointer/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
secrets.yml
|
15
client/vendor/github.com/go-openapi/jsonpointer/.travis.yml
generated
vendored
Normal file
15
client/vendor/github.com/go-openapi/jsonpointer/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
install:
|
||||
- GO111MODULE=off go get -u gotest.tools/gotestsum
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
74
client/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
client/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
client/vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
Normal file
202
client/vendor/github.com/go-openapi/jsonpointer/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
client/vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
Normal file
15
client/vendor/github.com/go-openapi/jsonpointer/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
|
||||
An implementation of JSON Pointer - Go language
|
||||
|
||||
## Status
|
||||
Completed YES
|
||||
|
||||
Tested YES
|
||||
|
||||
## References
|
||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
||||
|
||||
### Note
|
||||
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
|
390
client/vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
Normal file
390
client/vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author sigu-399
|
||||
// author-github https://github.com/sigu-399
|
||||
// author-mail sigu.399@gmail.com
|
||||
//
|
||||
// repository-name jsonpointer
|
||||
// repository-desc An implementation of JSON Pointer - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 25-02-2013
|
||||
|
||||
package jsonpointer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
const (
|
||||
emptyPointer = ``
|
||||
pointerSeparator = `/`
|
||||
|
||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
)
|
||||
|
||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
||||
var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
|
||||
|
||||
// JSONPointable is an interface for structs to implement when they need to customize the
|
||||
// json pointer process
|
||||
type JSONPointable interface {
|
||||
JSONLookup(string) (interface{}, error)
|
||||
}
|
||||
|
||||
// JSONSetable is an interface for structs to implement when they need to customize the
|
||||
// json pointer process
|
||||
type JSONSetable interface {
|
||||
JSONSet(string, interface{}) error
|
||||
}
|
||||
|
||||
// New creates a new json pointer for the given string
|
||||
func New(jsonPointerString string) (Pointer, error) {
|
||||
|
||||
var p Pointer
|
||||
err := p.parse(jsonPointerString)
|
||||
return p, err
|
||||
|
||||
}
|
||||
|
||||
// Pointer the json pointer reprsentation
|
||||
type Pointer struct {
|
||||
referenceTokens []string
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON pointer
|
||||
func (p *Pointer) parse(jsonPointerString string) error {
|
||||
|
||||
var err error
|
||||
|
||||
if jsonPointerString != emptyPointer {
|
||||
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
|
||||
err = errors.New(invalidStart)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
||||
for _, referenceToken := range referenceTokens[1:] {
|
||||
p.referenceTokens = append(p.referenceTokens, referenceToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get uses the pointer to retrieve a value from a JSON document
|
||||
func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
||||
return p.get(document, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// Set uses the pointer to set a value from a JSON document
|
||||
func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
|
||||
return document, p.set(document, value, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// GetForToken gets a value for a json pointer token 1 level deep
|
||||
func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
|
||||
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
// SetForToken gets a value for a json pointer token 1 level deep
|
||||
func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
|
||||
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
|
||||
}
|
||||
|
||||
func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind := rValue.Kind()
|
||||
|
||||
if rValue.Type().Implements(jsonPointableType) {
|
||||
r, err := node.(JSONPointable).JSONLookup(decodedToken)
|
||||
if err != nil {
|
||||
return nil, kind, err
|
||||
}
|
||||
return r, kind, nil
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
return fld.Interface(), kind, nil
|
||||
|
||||
case reflect.Map:
|
||||
kv := reflect.ValueOf(decodedToken)
|
||||
mv := rValue.MapIndex(kv)
|
||||
|
||||
if mv.IsValid() {
|
||||
return mv.Interface(), kind, nil
|
||||
}
|
||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return nil, kind, err
|
||||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
return elem.Interface(), kind, nil
|
||||
|
||||
default:
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
|
||||
if ns, ok := node.(JSONSetable); ok { // pointer impl
|
||||
return ns.JSONSet(decodedToken, data)
|
||||
}
|
||||
|
||||
if rValue.Type().Implements(jsonSetableType) {
|
||||
return node.(JSONSetable).JSONSet(decodedToken, data)
|
||||
}
|
||||
|
||||
switch rValue.Kind() {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.IsValid() {
|
||||
fld.Set(reflect.ValueOf(data))
|
||||
}
|
||||
return nil
|
||||
|
||||
case reflect.Map:
|
||||
kv := reflect.ValueOf(decodedToken)
|
||||
rValue.SetMapIndex(kv, reflect.ValueOf(data))
|
||||
return nil
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
if !elem.CanSet() {
|
||||
return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
|
||||
}
|
||||
elem.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
|
||||
|
||||
if nameProvider == nil {
|
||||
nameProvider = swag.DefaultJSONNameProvider
|
||||
}
|
||||
|
||||
kind := reflect.Invalid
|
||||
|
||||
// Full document when empty
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return node, kind, nil
|
||||
}
|
||||
|
||||
for _, token := range p.referenceTokens {
|
||||
|
||||
decodedToken := Unescape(token)
|
||||
|
||||
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
|
||||
if err != nil {
|
||||
return nil, knd, err
|
||||
}
|
||||
node, kind = r, knd
|
||||
|
||||
}
|
||||
|
||||
rValue := reflect.ValueOf(node)
|
||||
kind = rValue.Kind()
|
||||
|
||||
return node, kind, nil
|
||||
}
|
||||
|
||||
func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
|
||||
knd := reflect.ValueOf(node).Kind()
|
||||
|
||||
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
|
||||
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
|
||||
}
|
||||
|
||||
if nameProvider == nil {
|
||||
nameProvider = swag.DefaultJSONNameProvider
|
||||
}
|
||||
|
||||
// Full document when empty
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
lastI := len(p.referenceTokens) - 1
|
||||
for i, token := range p.referenceTokens {
|
||||
isLastToken := i == lastI
|
||||
decodedToken := Unescape(token)
|
||||
|
||||
if isLastToken {
|
||||
|
||||
return setSingleImpl(node, data, decodedToken, nameProvider)
|
||||
}
|
||||
|
||||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind := rValue.Kind()
|
||||
|
||||
if rValue.Type().Implements(jsonPointableType) {
|
||||
r, err := node.(JSONPointable).JSONLookup(decodedToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fld := reflect.ValueOf(r)
|
||||
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
|
||||
node = fld.Addr().Interface()
|
||||
continue
|
||||
}
|
||||
node = r
|
||||
continue
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
|
||||
node = fld.Addr().Interface()
|
||||
continue
|
||||
}
|
||||
node = fld.Interface()
|
||||
|
||||
case reflect.Map:
|
||||
kv := reflect.ValueOf(decodedToken)
|
||||
mv := rValue.MapIndex(kv)
|
||||
|
||||
if !mv.IsValid() {
|
||||
return fmt.Errorf("object has no key %q", decodedToken)
|
||||
}
|
||||
if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
|
||||
node = mv.Addr().Interface()
|
||||
continue
|
||||
}
|
||||
node = mv.Interface()
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr {
|
||||
node = elem.Addr().Interface()
|
||||
continue
|
||||
}
|
||||
node = elem.Interface()
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodedTokens returns the decoded tokens
|
||||
func (p *Pointer) DecodedTokens() []string {
|
||||
result := make([]string, 0, len(p.referenceTokens))
|
||||
for _, t := range p.referenceTokens {
|
||||
result = append(result, Unescape(t))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsEmpty returns true if this is an empty json pointer
|
||||
// this indicates that it points to the root document
|
||||
func (p *Pointer) IsEmpty() bool {
|
||||
return len(p.referenceTokens) == 0
|
||||
}
|
||||
|
||||
// Pointer to string representation function
|
||||
func (p *Pointer) String() string {
|
||||
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return emptyPointer
|
||||
}
|
||||
|
||||
pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
|
||||
|
||||
return pointerString
|
||||
}
|
||||
|
||||
// Specific JSON pointer encoding here
|
||||
// ~0 => ~
|
||||
// ~1 => /
|
||||
// ... and vice versa
|
||||
|
||||
const (
|
||||
encRefTok0 = `~0`
|
||||
encRefTok1 = `~1`
|
||||
decRefTok0 = `~`
|
||||
decRefTok1 = `/`
|
||||
)
|
||||
|
||||
// Unescape unescapes a json pointer reference token string to the original representation
|
||||
func Unescape(token string) string {
|
||||
step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
|
||||
step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
|
||||
return step2
|
||||
}
|
||||
|
||||
// Escape escapes a pointer reference token string
|
||||
func Escape(token string) string {
|
||||
step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
|
||||
step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
|
||||
return step2
|
||||
}
|
1
client/vendor/github.com/go-openapi/jsonreference/.gitignore
generated
vendored
Normal file
1
client/vendor/github.com/go-openapi/jsonreference/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
secrets.yml
|
41
client/vendor/github.com/go-openapi/jsonreference/.golangci.yml
generated
vendored
Normal file
41
client/vendor/github.com/go-openapi/jsonreference/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 30
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 4
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- lll
|
||||
- gochecknoglobals
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- scopelint
|
||||
- wrapcheck
|
||||
- exhaustivestruct
|
||||
- exhaustive
|
||||
- nlreturn
|
||||
- testpackage
|
||||
- gci
|
||||
- gofumpt
|
||||
- goerr113
|
||||
- gomnd
|
||||
- tparallel
|
||||
- nestif
|
||||
- godot
|
||||
- errorlint
|
24
client/vendor/github.com/go-openapi/jsonreference/.travis.yml
generated
vendored
Normal file
24
client/vendor/github.com/go-openapi/jsonreference/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.x
|
||||
install:
|
||||
- go get gotest.tools/gotestsum
|
||||
jobs:
|
||||
include:
|
||||
# include linting job, but only for latest go version and amd64 arch
|
||||
- go: 1.x
|
||||
arch: amd64
|
||||
install:
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
script:
|
||||
- golangci-lint run --new-from-rev master
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
language: go
|
||||
notifications:
|
||||
slack:
|
||||
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
|
||||
script:
|
||||
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
|
74
client/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
client/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
client/vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
Normal file
202
client/vendor/github.com/go-openapi/jsonreference/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
client/vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
Normal file
15
client/vendor/github.com/go-openapi/jsonreference/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# gojsonreference [](https://travis-ci.org/go-openapi/jsonreference) [](https://codecov.io/gh/go-openapi/jsonreference) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
|
||||
An implementation of JSON Reference - Go language
|
||||
|
||||
## Status
|
||||
Feature complete. Stable API
|
||||
|
||||
## Dependencies
|
||||
https://github.com/go-openapi/jsonpointer
|
||||
|
||||
## References
|
||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
||||
|
||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
|
63
client/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
Normal file
63
client/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
|
||||
// NormalizeURL will normalize the specified URL
|
||||
// This was added to replace a previous call to the no longer maintained purell library:
|
||||
// The call that was used looked like the following:
|
||||
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||
//
|
||||
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
|
||||
// - FlagLowercaseScheme
|
||||
// - FlagLowercaseHost
|
||||
// - FlagRemoveDefaultPort
|
||||
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
|
||||
func NormalizeURL(u *url.URL) {
|
||||
lowercaseScheme(u)
|
||||
lowercaseHost(u)
|
||||
removeDefaultPort(u)
|
||||
removeDuplicateSlashes(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
158
client/vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
Normal file
158
client/vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author sigu-399
|
||||
// author-github https://github.com/sigu-399
|
||||
// author-mail sigu.399@gmail.com
|
||||
//
|
||||
// repository-name jsonreference
|
||||
// repository-desc An implementation of JSON Reference - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package jsonreference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/jsonreference/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
fragmentRune = `#`
|
||||
)
|
||||
|
||||
// New creates a new reference for the given string
|
||||
func New(jsonReferenceString string) (Ref, error) {
|
||||
|
||||
var r Ref
|
||||
err := r.parse(jsonReferenceString)
|
||||
return r, err
|
||||
|
||||
}
|
||||
|
||||
// MustCreateRef parses the ref string and panics when it's invalid.
|
||||
// Use the New method for a version that returns an error
|
||||
func MustCreateRef(ref string) Ref {
|
||||
r, err := New(ref)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Ref represents a json reference object
|
||||
type Ref struct {
|
||||
referenceURL *url.URL
|
||||
referencePointer jsonpointer.Pointer
|
||||
|
||||
HasFullURL bool
|
||||
HasURLPathOnly bool
|
||||
HasFragmentOnly bool
|
||||
HasFileScheme bool
|
||||
HasFullFilePath bool
|
||||
}
|
||||
|
||||
// GetURL gets the URL for this reference
|
||||
func (r *Ref) GetURL() *url.URL {
|
||||
return r.referenceURL
|
||||
}
|
||||
|
||||
// GetPointer gets the json pointer for this reference
|
||||
func (r *Ref) GetPointer() *jsonpointer.Pointer {
|
||||
return &r.referencePointer
|
||||
}
|
||||
|
||||
// String returns the best version of the url for this reference
|
||||
func (r *Ref) String() string {
|
||||
|
||||
if r.referenceURL != nil {
|
||||
return r.referenceURL.String()
|
||||
}
|
||||
|
||||
if r.HasFragmentOnly {
|
||||
return fragmentRune + r.referencePointer.String()
|
||||
}
|
||||
|
||||
return r.referencePointer.String()
|
||||
}
|
||||
|
||||
// IsRoot returns true if this reference is a root document
|
||||
func (r *Ref) IsRoot() bool {
|
||||
return r.referenceURL != nil &&
|
||||
!r.IsCanonical() &&
|
||||
!r.HasURLPathOnly &&
|
||||
r.referenceURL.Fragment == ""
|
||||
}
|
||||
|
||||
// IsCanonical returns true when this pointer starts with http(s):// or file://
|
||||
func (r *Ref) IsCanonical() bool {
|
||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON reference
|
||||
func (r *Ref) parse(jsonReferenceString string) error {
|
||||
|
||||
parsed, err := url.Parse(jsonReferenceString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
internal.NormalizeURL(parsed)
|
||||
|
||||
r.referenceURL = parsed
|
||||
refURL := r.referenceURL
|
||||
|
||||
if refURL.Scheme != "" && refURL.Host != "" {
|
||||
r.HasFullURL = true
|
||||
} else {
|
||||
if refURL.Path != "" {
|
||||
r.HasURLPathOnly = true
|
||||
} else if refURL.RawQuery == "" && refURL.Fragment != "" {
|
||||
r.HasFragmentOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
r.HasFileScheme = refURL.Scheme == "file"
|
||||
r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
|
||||
|
||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
||||
r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inherits creates a new reference from a parent and a child
|
||||
// If the child cannot inherit from the parent, an error is returned
|
||||
func (r *Ref) Inherits(child Ref) (*Ref, error) {
|
||||
childURL := child.GetURL()
|
||||
parentURL := r.GetURL()
|
||||
if childURL == nil {
|
||||
return nil, errors.New("child url is nil")
|
||||
}
|
||||
if parentURL == nil {
|
||||
return &child, nil
|
||||
}
|
||||
|
||||
ref, err := New(parentURL.ResolveReference(childURL).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ref, nil
|
||||
}
|
26
client/vendor/github.com/go-openapi/swag/.editorconfig
generated
vendored
Normal file
26
client/vendor/github.com/go-openapi/swag/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Set default charset
|
||||
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
|
||||
charset = utf-8
|
||||
|
||||
# Tab indentation (no size specified)
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Matches the exact files either package.json or .travis.yml
|
||||
[{package.json,.travis.yml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
2
client/vendor/github.com/go-openapi/swag/.gitattributes
generated
vendored
Normal file
2
client/vendor/github.com/go-openapi/swag/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# gofmt always uses LF, whereas Git uses CRLF on Windows.
|
||||
*.go text eol=lf
|
4
client/vendor/github.com/go-openapi/swag/.gitignore
generated
vendored
Normal file
4
client/vendor/github.com/go-openapi/swag/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
secrets.yml
|
||||
vendor
|
||||
Godeps
|
||||
.idea
|
54
client/vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
Normal file
54
client/vendor/github.com/go-openapi/swag/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 25
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 2
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- nlreturn
|
||||
- testpackage
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- exhaustive
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- wsl
|
||||
- whitespace
|
||||
- gofumpt
|
||||
- godot
|
||||
- nestif
|
||||
- godox
|
||||
- funlen
|
||||
- gci
|
||||
- gocognit
|
||||
- paralleltest
|
||||
- thelper
|
||||
- ifshort
|
||||
- gomoddirectives
|
||||
- cyclop
|
||||
- forcetypeassert
|
||||
- ireturn
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- goimports
|
||||
- tenv
|
||||
- golint
|
||||
- exhaustruct
|
||||
- nilnil
|
||||
- nonamedreturns
|
||||
- nosnakecase
|
74
client/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
client/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
client/vendor/github.com/go-openapi/swag/LICENSE
generated
vendored
Normal file
202
client/vendor/github.com/go-openapi/swag/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
21
client/vendor/github.com/go-openapi/swag/README.md
generated
vendored
Normal file
21
client/vendor/github.com/go-openapi/swag/README.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
|
||||
|
||||
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
|
||||
[](http://godoc.org/github.com/go-openapi/swag)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/swag)
|
||||
|
||||
Contains a bunch of helper functions for go-openapi and go-swagger projects.
|
||||
|
||||
You may also use it standalone for your projects.
|
||||
|
||||
* convert between value and pointers for builtin types
|
||||
* convert from string to builtin types (wraps strconv)
|
||||
* fast json concatenation
|
||||
* search in path
|
||||
* load from file or http
|
||||
* name mangling
|
||||
|
||||
|
||||
This repo has only few dependencies outside of the standard library:
|
||||
|
||||
* YAML utilities depend on gopkg.in/yaml.v2
|
208
client/vendor/github.com/go-openapi/swag/convert.go
generated
vendored
Normal file
208
client/vendor/github.com/go-openapi/swag/convert.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
|
||||
const (
|
||||
maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
|
||||
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
|
||||
epsilon float64 = 1e-9
|
||||
)
|
||||
|
||||
// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive
|
||||
func IsFloat64AJSONInteger(f float64) bool {
|
||||
if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat {
|
||||
return false
|
||||
}
|
||||
fa := math.Abs(f)
|
||||
g := float64(uint64(f))
|
||||
ga := math.Abs(g)
|
||||
|
||||
diff := math.Abs(f - g)
|
||||
|
||||
// more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases
|
||||
switch {
|
||||
case f == g: // best case
|
||||
return true
|
||||
case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case
|
||||
return true
|
||||
case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values
|
||||
return diff < (epsilon * math.SmallestNonzeroFloat64)
|
||||
}
|
||||
// check the relative error
|
||||
return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon
|
||||
}
|
||||
|
||||
var evaluatesAsTrue map[string]struct{}
|
||||
|
||||
func init() {
|
||||
evaluatesAsTrue = map[string]struct{}{
|
||||
"true": {},
|
||||
"1": {},
|
||||
"yes": {},
|
||||
"ok": {},
|
||||
"y": {},
|
||||
"on": {},
|
||||
"selected": {},
|
||||
"checked": {},
|
||||
"t": {},
|
||||
"enabled": {},
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertBool turn a string into a boolean
|
||||
func ConvertBool(str string) (bool, error) {
|
||||
_, ok := evaluatesAsTrue[strings.ToLower(str)]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// ConvertFloat32 turn a string into a float32
|
||||
func ConvertFloat32(str string) (float32, error) {
|
||||
f, err := strconv.ParseFloat(str, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
// ConvertFloat64 turn a string into a float64
|
||||
func ConvertFloat64(str string) (float64, error) {
|
||||
return strconv.ParseFloat(str, 64)
|
||||
}
|
||||
|
||||
// ConvertInt8 turn a string into an int8
|
||||
func ConvertInt8(str string) (int8, error) {
|
||||
i, err := strconv.ParseInt(str, 10, 8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int8(i), nil
|
||||
}
|
||||
|
||||
// ConvertInt16 turn a string into an int16
|
||||
func ConvertInt16(str string) (int16, error) {
|
||||
i, err := strconv.ParseInt(str, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int16(i), nil
|
||||
}
|
||||
|
||||
// ConvertInt32 turn a string into an int32
|
||||
func ConvertInt32(str string) (int32, error) {
|
||||
i, err := strconv.ParseInt(str, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
// ConvertInt64 turn a string into an int64
|
||||
func ConvertInt64(str string) (int64, error) {
|
||||
return strconv.ParseInt(str, 10, 64)
|
||||
}
|
||||
|
||||
// ConvertUint8 turn a string into an uint8
|
||||
func ConvertUint8(str string) (uint8, error) {
|
||||
i, err := strconv.ParseUint(str, 10, 8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint8(i), nil
|
||||
}
|
||||
|
||||
// ConvertUint16 turn a string into an uint16
|
||||
func ConvertUint16(str string) (uint16, error) {
|
||||
i, err := strconv.ParseUint(str, 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(i), nil
|
||||
}
|
||||
|
||||
// ConvertUint32 turn a string into an uint32
|
||||
func ConvertUint32(str string) (uint32, error) {
|
||||
i, err := strconv.ParseUint(str, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
// ConvertUint64 turn a string into an uint64
|
||||
func ConvertUint64(str string) (uint64, error) {
|
||||
return strconv.ParseUint(str, 10, 64)
|
||||
}
|
||||
|
||||
// FormatBool turns a boolean into a string
|
||||
func FormatBool(value bool) string {
|
||||
return strconv.FormatBool(value)
|
||||
}
|
||||
|
||||
// FormatFloat32 turns a float32 into a string
|
||||
func FormatFloat32(value float32) string {
|
||||
return strconv.FormatFloat(float64(value), 'f', -1, 32)
|
||||
}
|
||||
|
||||
// FormatFloat64 turns a float64 into a string
|
||||
func FormatFloat64(value float64) string {
|
||||
return strconv.FormatFloat(value, 'f', -1, 64)
|
||||
}
|
||||
|
||||
// FormatInt8 turns an int8 into a string
|
||||
func FormatInt8(value int8) string {
|
||||
return strconv.FormatInt(int64(value), 10)
|
||||
}
|
||||
|
||||
// FormatInt16 turns an int16 into a string
|
||||
func FormatInt16(value int16) string {
|
||||
return strconv.FormatInt(int64(value), 10)
|
||||
}
|
||||
|
||||
// FormatInt32 turns an int32 into a string
|
||||
func FormatInt32(value int32) string {
|
||||
return strconv.Itoa(int(value))
|
||||
}
|
||||
|
||||
// FormatInt64 turns an int64 into a string
|
||||
func FormatInt64(value int64) string {
|
||||
return strconv.FormatInt(value, 10)
|
||||
}
|
||||
|
||||
// FormatUint8 turns an uint8 into a string
|
||||
func FormatUint8(value uint8) string {
|
||||
return strconv.FormatUint(uint64(value), 10)
|
||||
}
|
||||
|
||||
// FormatUint16 turns an uint16 into a string
|
||||
func FormatUint16(value uint16) string {
|
||||
return strconv.FormatUint(uint64(value), 10)
|
||||
}
|
||||
|
||||
// FormatUint32 turns an uint32 into a string
|
||||
func FormatUint32(value uint32) string {
|
||||
return strconv.FormatUint(uint64(value), 10)
|
||||
}
|
||||
|
||||
// FormatUint64 turns an uint64 into a string
|
||||
func FormatUint64(value uint64) string {
|
||||
return strconv.FormatUint(value, 10)
|
||||
}
|
730
client/vendor/github.com/go-openapi/swag/convert_types.go
generated
vendored
Normal file
730
client/vendor/github.com/go-openapi/swag/convert_types.go
generated
vendored
Normal file
@@ -0,0 +1,730 @@
|
||||
package swag
|
||||
|
||||
import "time"
|
||||
|
||||
// This file was taken from the aws go sdk
|
||||
|
||||
// String returns a pointer to of the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// StringValue returns the value of the string pointer passed in or
|
||||
// "" if the pointer is nil.
|
||||
func StringValue(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// StringSlice converts a slice of string values into a slice of
|
||||
// string pointers
|
||||
func StringSlice(src []string) []*string {
|
||||
dst := make([]*string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueSlice converts a slice of string pointers into a slice of
|
||||
// string values
|
||||
func StringValueSlice(src []*string) []string {
|
||||
dst := make([]string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringMap converts a string map of string values into a string
|
||||
// map of string pointers
|
||||
func StringMap(src map[string]string) map[string]*string {
|
||||
dst := make(map[string]*string)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueMap converts a string map of string pointers into a string
|
||||
// map of string values
|
||||
func StringValueMap(src map[string]*string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to of the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// BoolValue returns the value of the bool pointer passed in or
|
||||
// false if the pointer is nil.
|
||||
func BoolValue(v *bool) bool {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BoolSlice converts a slice of bool values into a slice of
|
||||
// bool pointers
|
||||
func BoolSlice(src []bool) []*bool {
|
||||
dst := make([]*bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||
// bool values
|
||||
func BoolValueSlice(src []*bool) []bool {
|
||||
dst := make([]bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolMap converts a string map of bool values into a string
|
||||
// map of bool pointers
|
||||
func BoolMap(src map[string]bool) map[string]*bool {
|
||||
dst := make(map[string]*bool)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueMap converts a string map of bool pointers into a string
|
||||
// map of bool values
|
||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||
dst := make(map[string]bool)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to of the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
||||
// IntValue returns the value of the int pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func IntValue(v *int) int {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IntSlice converts a slice of int values into a slice of
|
||||
// int pointers
|
||||
func IntSlice(src []int) []*int {
|
||||
dst := make([]*int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueSlice converts a slice of int pointers into a slice of
|
||||
// int values
|
||||
func IntValueSlice(src []*int) []int {
|
||||
dst := make([]int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntMap converts a string map of int values into a string
|
||||
// map of int pointers
|
||||
func IntMap(src map[string]int) map[string]*int {
|
||||
dst := make(map[string]*int)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueMap converts a string map of int pointers into a string
|
||||
// map of int values
|
||||
func IntValueMap(src map[string]*int) map[string]int {
|
||||
dst := make(map[string]int)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32 returns a pointer to of the int32 value passed in.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32Value returns the value of the int32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int32Value(v *int32) int32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int32Slice converts a slice of int32 values into a slice of
|
||||
// int32 pointers
|
||||
func Int32Slice(src []int32) []*int32 {
|
||||
dst := make([]*int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueSlice converts a slice of int32 pointers into a slice of
|
||||
// int32 values
|
||||
func Int32ValueSlice(src []*int32) []int32 {
|
||||
dst := make([]int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32Map converts a string map of int32 values into a string
|
||||
// map of int32 pointers
|
||||
func Int32Map(src map[string]int32) map[string]*int32 {
|
||||
dst := make(map[string]*int32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueMap converts a string map of int32 pointers into a string
|
||||
// map of int32 values
|
||||
func Int32ValueMap(src map[string]*int32) map[string]int32 {
|
||||
dst := make(map[string]int32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to of the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int64Value returns the value of the int64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int64Value(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int64Slice converts a slice of int64 values into a slice of
|
||||
// int64 pointers
|
||||
func Int64Slice(src []int64) []*int64 {
|
||||
dst := make([]*int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||
// int64 values
|
||||
func Int64ValueSlice(src []*int64) []int64 {
|
||||
dst := make([]int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64Map converts a string map of int64 values into a string
|
||||
// map of int64 pointers
|
||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||
dst := make(map[string]*int64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||
// map of int64 values
|
||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||
dst := make(map[string]int64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16 returns a pointer to of the uint16 value passed in.
|
||||
func Uint16(v uint16) *uint16 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint16Value returns the value of the uint16 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint16Value(v *uint16) uint16 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint16Slice converts a slice of uint16 values into a slice of
|
||||
// uint16 pointers
|
||||
func Uint16Slice(src []uint16) []*uint16 {
|
||||
dst := make([]*uint16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
|
||||
// uint16 values
|
||||
func Uint16ValueSlice(src []*uint16) []uint16 {
|
||||
dst := make([]uint16, len(src))
|
||||
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16Map converts a string map of uint16 values into a string
|
||||
// map of uint16 pointers
|
||||
func Uint16Map(src map[string]uint16) map[string]*uint16 {
|
||||
dst := make(map[string]*uint16)
|
||||
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueMap converts a string map of uint16 pointers into a string
|
||||
// map of uint16 values
|
||||
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
|
||||
dst := make(map[string]uint16)
|
||||
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint returns a pointer to of the uint value passed in.
|
||||
func Uint(v uint) *uint {
|
||||
return &v
|
||||
}
|
||||
|
||||
// UintValue returns the value of the uint pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func UintValue(v *uint) uint {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// UintSlice converts a slice of uint values into a slice of
|
||||
// uint pointers
|
||||
func UintSlice(src []uint) []*uint {
|
||||
dst := make([]*uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueSlice converts a slice of uint pointers into a slice of
|
||||
// uint values
|
||||
func UintValueSlice(src []*uint) []uint {
|
||||
dst := make([]uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintMap converts a string map of uint values into a string
|
||||
// map of uint pointers
|
||||
func UintMap(src map[string]uint) map[string]*uint {
|
||||
dst := make(map[string]*uint)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueMap converts a string map of uint pointers into a string
|
||||
// map of uint values
|
||||
func UintValueMap(src map[string]*uint) map[string]uint {
|
||||
dst := make(map[string]uint)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32 returns a pointer to of the uint32 value passed in.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32Value returns the value of the uint32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint32Value(v *uint32) uint32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint32Slice converts a slice of uint32 values into a slice of
|
||||
// uint32 pointers
|
||||
func Uint32Slice(src []uint32) []*uint32 {
|
||||
dst := make([]*uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
|
||||
// uint32 values
|
||||
func Uint32ValueSlice(src []*uint32) []uint32 {
|
||||
dst := make([]uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32Map converts a string map of uint32 values into a string
|
||||
// map of uint32 pointers
|
||||
func Uint32Map(src map[string]uint32) map[string]*uint32 {
|
||||
dst := make(map[string]*uint32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueMap converts a string map of uint32 pointers into a string
|
||||
// map of uint32 values
|
||||
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
|
||||
dst := make(map[string]uint32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64 returns a pointer to of the uint64 value passed in.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64Value returns the value of the uint64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint64Value(v *uint64) uint64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint64Slice converts a slice of uint64 values into a slice of
|
||||
// uint64 pointers
|
||||
func Uint64Slice(src []uint64) []*uint64 {
|
||||
dst := make([]*uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
|
||||
// uint64 values
|
||||
func Uint64ValueSlice(src []*uint64) []uint64 {
|
||||
dst := make([]uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64Map converts a string map of uint64 values into a string
|
||||
// map of uint64 pointers
|
||||
func Uint64Map(src map[string]uint64) map[string]*uint64 {
|
||||
dst := make(map[string]*uint64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueMap converts a string map of uint64 pointers into a string
|
||||
// map of uint64 values
|
||||
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
|
||||
dst := make(map[string]uint64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32 returns a pointer to of the float32 value passed in.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32Value returns the value of the float32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float32Value(v *float32) float32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float32Slice converts a slice of float32 values into a slice of
|
||||
// float32 pointers
|
||||
func Float32Slice(src []float32) []*float32 {
|
||||
dst := make([]*float32, len(src))
|
||||
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueSlice converts a slice of float32 pointers into a slice of
|
||||
// float32 values
|
||||
func Float32ValueSlice(src []*float32) []float32 {
|
||||
dst := make([]float32, len(src))
|
||||
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32Map converts a string map of float32 values into a string
|
||||
// map of float32 pointers
|
||||
func Float32Map(src map[string]float32) map[string]*float32 {
|
||||
dst := make(map[string]*float32)
|
||||
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueMap converts a string map of float32 pointers into a string
|
||||
// map of float32 values
|
||||
func Float32ValueMap(src map[string]*float32) map[string]float32 {
|
||||
dst := make(map[string]float32)
|
||||
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to of the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64Value returns the value of the float64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float64Value(v *float64) float64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64Slice converts a slice of float64 values into a slice of
|
||||
// float64 pointers
|
||||
func Float64Slice(src []float64) []*float64 {
|
||||
dst := make([]*float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||
// float64 values
|
||||
func Float64ValueSlice(src []*float64) []float64 {
|
||||
dst := make([]float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64Map converts a string map of float64 values into a string
|
||||
// map of float64 pointers
|
||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||
dst := make(map[string]*float64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||
// map of float64 values
|
||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||
dst := make(map[string]float64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to of the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
||||
// TimeValue returns the value of the time.Time pointer passed in or
|
||||
// time.Time{} if the pointer is nil.
|
||||
func TimeValue(v *time.Time) time.Time {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
dst := make([]*time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||
// time.Time values
|
||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||
dst := make([]time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeMap converts a string map of time.Time values into a string
|
||||
// map of time.Time pointers
|
||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||
dst := make(map[string]*time.Time)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||
// map of time.Time values
|
||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||
dst := make(map[string]time.Time)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
31
client/vendor/github.com/go-openapi/swag/doc.go
generated
vendored
Normal file
31
client/vendor/github.com/go-openapi/swag/doc.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package swag contains a bunch of helper functions for go-openapi and go-swagger projects.
|
||||
|
||||
You may also use it standalone for your projects.
|
||||
|
||||
- convert between value and pointers for builtin types
|
||||
- convert from string to builtin types (wraps strconv)
|
||||
- fast json concatenation
|
||||
- search in path
|
||||
- load from file or http
|
||||
- name mangling
|
||||
|
||||
This repo has only few dependencies outside of the standard library:
|
||||
|
||||
- YAML utilities depend on gopkg.in/yaml.v2
|
||||
*/
|
||||
package swag
|
33
client/vendor/github.com/go-openapi/swag/file.go
generated
vendored
Normal file
33
client/vendor/github.com/go-openapi/swag/file.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import "mime/multipart"
|
||||
|
||||
// File represents an uploaded file.
|
||||
type File struct {
|
||||
Data multipart.File
|
||||
Header *multipart.FileHeader
|
||||
}
|
||||
|
||||
// Read bytes from the file
|
||||
func (f *File) Read(p []byte) (n int, err error) {
|
||||
return f.Data.Read(p)
|
||||
}
|
||||
|
||||
// Close the file
|
||||
func (f *File) Close() error {
|
||||
return f.Data.Close()
|
||||
}
|
312
client/vendor/github.com/go-openapi/swag/json.go
generated
vendored
Normal file
312
client/vendor/github.com/go-openapi/swag/json.go
generated
vendored
Normal file
@@ -0,0 +1,312 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/mailru/easyjson/jlexer"
|
||||
"github.com/mailru/easyjson/jwriter"
|
||||
)
|
||||
|
||||
// nullJSON represents a JSON object with null type
|
||||
var nullJSON = []byte("null")
|
||||
|
||||
// DefaultJSONNameProvider the default cache for types
|
||||
var DefaultJSONNameProvider = NewNameProvider()
|
||||
|
||||
const comma = byte(',')
|
||||
|
||||
var closers map[byte]byte
|
||||
|
||||
func init() {
|
||||
closers = map[byte]byte{
|
||||
'{': '}',
|
||||
'[': ']',
|
||||
}
|
||||
}
|
||||
|
||||
type ejMarshaler interface {
|
||||
MarshalEasyJSON(w *jwriter.Writer)
|
||||
}
|
||||
|
||||
type ejUnmarshaler interface {
|
||||
UnmarshalEasyJSON(w *jlexer.Lexer)
|
||||
}
|
||||
|
||||
// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler
|
||||
// so it takes the fastest option available.
|
||||
func WriteJSON(data interface{}) ([]byte, error) {
|
||||
if d, ok := data.(ejMarshaler); ok {
|
||||
jw := new(jwriter.Writer)
|
||||
d.MarshalEasyJSON(jw)
|
||||
return jw.BuildBytes()
|
||||
}
|
||||
if d, ok := data.(json.Marshaler); ok {
|
||||
return d.MarshalJSON()
|
||||
}
|
||||
return json.Marshal(data)
|
||||
}
|
||||
|
||||
// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler
|
||||
// so it takes the fastest option available
|
||||
func ReadJSON(data []byte, value interface{}) error {
|
||||
trimmedData := bytes.Trim(data, "\x00")
|
||||
if d, ok := value.(ejUnmarshaler); ok {
|
||||
jl := &jlexer.Lexer{Data: trimmedData}
|
||||
d.UnmarshalEasyJSON(jl)
|
||||
return jl.Error()
|
||||
}
|
||||
if d, ok := value.(json.Unmarshaler); ok {
|
||||
return d.UnmarshalJSON(trimmedData)
|
||||
}
|
||||
return json.Unmarshal(trimmedData, value)
|
||||
}
|
||||
|
||||
// DynamicJSONToStruct converts an untyped json structure into a struct
|
||||
func DynamicJSONToStruct(data interface{}, target interface{}) error {
|
||||
// TODO: convert straight to a json typed map (mergo + iterate?)
|
||||
b, err := WriteJSON(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ReadJSON(b, target)
|
||||
}
|
||||
|
||||
// ConcatJSON concatenates multiple json objects efficiently
|
||||
func ConcatJSON(blobs ...[]byte) []byte {
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
last := len(blobs) - 1
|
||||
for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) {
|
||||
// strips trailing null objects
|
||||
last--
|
||||
if last < 0 {
|
||||
// there was nothing but "null"s or nil...
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if last == 0 {
|
||||
return blobs[0]
|
||||
}
|
||||
|
||||
var opening, closing byte
|
||||
var idx, a int
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
for i, b := range blobs[:last+1] {
|
||||
if b == nil || bytes.Equal(b, nullJSON) {
|
||||
// a null object is in the list: skip it
|
||||
continue
|
||||
}
|
||||
if len(b) > 0 && opening == 0 { // is this an array or an object?
|
||||
opening, closing = b[0], closers[b[0]]
|
||||
}
|
||||
|
||||
if opening != '{' && opening != '[' {
|
||||
continue // don't know how to concatenate non container objects
|
||||
}
|
||||
|
||||
if len(b) < 3 { // yep empty but also the last one, so closing this thing
|
||||
if i == last && a > 0 {
|
||||
if err := buf.WriteByte(closing); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
idx = 0
|
||||
if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
|
||||
if err := buf.WriteByte(comma); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
idx = 1 // this is not the first or the last so we want to drop the leading bracket
|
||||
}
|
||||
|
||||
if i != last { // not the last one, strip brackets
|
||||
if _, err := buf.Write(b[idx : len(b)-1]); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
} else { // last one, strip only the leading bracket
|
||||
if _, err := buf.Write(b[idx:]); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
a++
|
||||
}
|
||||
// somehow it ended up being empty, so provide a default value
|
||||
if buf.Len() == 0 {
|
||||
if err := buf.WriteByte(opening); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if err := buf.WriteByte(closing); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// ToDynamicJSON turns an object into a properly JSON typed structure
|
||||
func ToDynamicJSON(data interface{}) interface{} {
|
||||
// TODO: convert straight to a json typed map (mergo + iterate?)
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
var res interface{}
|
||||
if err := json.Unmarshal(b, &res); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// FromDynamicJSON turns an object into a properly JSON typed structure
|
||||
func FromDynamicJSON(data, target interface{}) error {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
return json.Unmarshal(b, target)
|
||||
}
|
||||
|
||||
// NameProvider represents an object capable of translating from go property names
|
||||
// to json property names
|
||||
// This type is thread-safe.
|
||||
type NameProvider struct {
|
||||
lock *sync.Mutex
|
||||
index map[reflect.Type]nameIndex
|
||||
}
|
||||
|
||||
type nameIndex struct {
|
||||
jsonNames map[string]string
|
||||
goNames map[string]string
|
||||
}
|
||||
|
||||
// NewNameProvider creates a new name provider
|
||||
func NewNameProvider() *NameProvider {
|
||||
return &NameProvider{
|
||||
lock: &sync.Mutex{},
|
||||
index: make(map[reflect.Type]nameIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) {
|
||||
for i := 0; i < tpe.NumField(); i++ {
|
||||
targetDes := tpe.Field(i)
|
||||
|
||||
if targetDes.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
|
||||
if targetDes.Anonymous { // walk embedded structures tree down first
|
||||
buildnameIndex(targetDes.Type, idx, reverseIdx)
|
||||
continue
|
||||
}
|
||||
|
||||
if tag := targetDes.Tag.Get("json"); tag != "" {
|
||||
|
||||
parts := strings.Split(tag, ",")
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
nm := parts[0]
|
||||
if nm == "-" {
|
||||
continue
|
||||
}
|
||||
if nm == "" { // empty string means we want to use the Go name
|
||||
nm = targetDes.Name
|
||||
}
|
||||
|
||||
idx[nm] = targetDes.Name
|
||||
reverseIdx[targetDes.Name] = nm
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newNameIndex(tpe reflect.Type) nameIndex {
|
||||
var idx = make(map[string]string, tpe.NumField())
|
||||
var reverseIdx = make(map[string]string, tpe.NumField())
|
||||
|
||||
buildnameIndex(tpe, idx, reverseIdx)
|
||||
return nameIndex{jsonNames: idx, goNames: reverseIdx}
|
||||
}
|
||||
|
||||
// GetJSONNames gets all the json property names for a type
|
||||
func (n *NameProvider) GetJSONNames(subject interface{}) []string {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
||||
names, ok := n.index[tpe]
|
||||
if !ok {
|
||||
names = n.makeNameIndex(tpe)
|
||||
}
|
||||
|
||||
res := make([]string, 0, len(names.jsonNames))
|
||||
for k := range names.jsonNames {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetJSONName gets the json name for a go property name
|
||||
func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) {
|
||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
||||
return n.GetJSONNameForType(tpe, name)
|
||||
}
|
||||
|
||||
// GetJSONNameForType gets the json name for a go property name on a given type
|
||||
func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
names, ok := n.index[tpe]
|
||||
if !ok {
|
||||
names = n.makeNameIndex(tpe)
|
||||
}
|
||||
nme, ok := names.goNames[name]
|
||||
return nme, ok
|
||||
}
|
||||
|
||||
func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
|
||||
names := newNameIndex(tpe)
|
||||
n.index[tpe] = names
|
||||
return names
|
||||
}
|
||||
|
||||
// GetGoName gets the go name for a json property name
|
||||
func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) {
|
||||
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
|
||||
return n.GetGoNameForType(tpe, name)
|
||||
}
|
||||
|
||||
// GetGoNameForType gets the go name for a given type for a json property name
|
||||
func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
names, ok := n.index[tpe]
|
||||
if !ok {
|
||||
names = n.makeNameIndex(tpe)
|
||||
}
|
||||
nme, ok := names.jsonNames[name]
|
||||
return nme, ok
|
||||
}
|
121
client/vendor/github.com/go-openapi/swag/loading.go
generated
vendored
Normal file
121
client/vendor/github.com/go-openapi/swag/loading.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LoadHTTPTimeout the default timeout for load requests
|
||||
var LoadHTTPTimeout = 30 * time.Second
|
||||
|
||||
// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth
|
||||
var LoadHTTPBasicAuthUsername = ""
|
||||
|
||||
// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth
|
||||
var LoadHTTPBasicAuthPassword = ""
|
||||
|
||||
// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests
|
||||
var LoadHTTPCustomHeaders = map[string]string{}
|
||||
|
||||
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
||||
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
|
||||
}
|
||||
|
||||
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
|
||||
// timeout arg allows for per request overriding of the request timeout
|
||||
func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
|
||||
}
|
||||
|
||||
// LoadStrategy returns a loader function for a given path or uri
|
||||
func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
||||
if strings.HasPrefix(path, "http") {
|
||||
return remote
|
||||
}
|
||||
return func(pth string) ([]byte, error) {
|
||||
upth, err := pathUnescape(pth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pth, `file://`) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// support for canonical file URIs on windows.
|
||||
// Zero tolerance here for dodgy URIs.
|
||||
u, _ := url.Parse(upth)
|
||||
if u.Host != "" {
|
||||
// assume UNC name (volume share)
|
||||
// file://host/share/folder\... ==> \\host\share\path\folder
|
||||
// NOTE: UNC port not yet supported
|
||||
upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
|
||||
} else {
|
||||
// file:///c:/folder/... ==> just remove the leading slash
|
||||
upth = strings.TrimPrefix(upth, `file:///`)
|
||||
}
|
||||
} else {
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
}
|
||||
}
|
||||
|
||||
return local(filepath.FromSlash(upth))
|
||||
}
|
||||
}
|
||||
|
||||
func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
|
||||
return func(path string) ([]byte, error) {
|
||||
client := &http.Client{Timeout: timeout}
|
||||
req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" {
|
||||
req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword)
|
||||
}
|
||||
|
||||
for key, val := range LoadHTTPCustomHeaders {
|
||||
req.Header.Set(key, val)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
if e := resp.Body.Close(); e != nil {
|
||||
log.Println(e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
|
||||
}
|
||||
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
}
|
87
client/vendor/github.com/go-openapi/swag/name_lexem.go
generated
vendored
Normal file
87
client/vendor/github.com/go-openapi/swag/name_lexem.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import "unicode"
|
||||
|
||||
type (
|
||||
nameLexem interface {
|
||||
GetUnsafeGoName() string
|
||||
GetOriginal() string
|
||||
IsInitialism() bool
|
||||
}
|
||||
|
||||
initialismNameLexem struct {
|
||||
original string
|
||||
matchedInitialism string
|
||||
}
|
||||
|
||||
casualNameLexem struct {
|
||||
original string
|
||||
}
|
||||
)
|
||||
|
||||
func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
|
||||
return &initialismNameLexem{
|
||||
original: original,
|
||||
matchedInitialism: matchedInitialism,
|
||||
}
|
||||
}
|
||||
|
||||
func newCasualNameLexem(original string) *casualNameLexem {
|
||||
return &casualNameLexem{
|
||||
original: original,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *initialismNameLexem) GetUnsafeGoName() string {
|
||||
return l.matchedInitialism
|
||||
}
|
||||
|
||||
func (l *casualNameLexem) GetUnsafeGoName() string {
|
||||
var first rune
|
||||
var rest string
|
||||
for i, orig := range l.original {
|
||||
if i == 0 {
|
||||
first = orig
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
rest = l.original[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(l.original) > 1 {
|
||||
return string(unicode.ToUpper(first)) + lower(rest)
|
||||
}
|
||||
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l *initialismNameLexem) GetOriginal() string {
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l *casualNameLexem) GetOriginal() string {
|
||||
return l.original
|
||||
}
|
||||
|
||||
func (l *initialismNameLexem) IsInitialism() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *casualNameLexem) IsInitialism() bool {
|
||||
return false
|
||||
}
|
38
client/vendor/github.com/go-openapi/swag/net.go
generated
vendored
Normal file
38
client/vendor/github.com/go-openapi/swag/net.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// SplitHostPort splits a network address into a host and a port.
|
||||
// The port is -1 when there is no port to be found
|
||||
func SplitHostPort(addr string) (host string, port int, err error) {
|
||||
h, p, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
if p == "" {
|
||||
return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr}
|
||||
}
|
||||
|
||||
pi, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
return h, pi, nil
|
||||
}
|
59
client/vendor/github.com/go-openapi/swag/path.go
generated
vendored
Normal file
59
client/vendor/github.com/go-openapi/swag/path.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// GOPATHKey represents the env key for gopath
|
||||
GOPATHKey = "GOPATH"
|
||||
)
|
||||
|
||||
// FindInSearchPath finds a package in a provided lists of paths
|
||||
func FindInSearchPath(searchPath, pkg string) string {
|
||||
pathsList := filepath.SplitList(searchPath)
|
||||
for _, path := range pathsList {
|
||||
if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil {
|
||||
if _, err := os.Stat(evaluatedPath); err == nil {
|
||||
return evaluatedPath
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT
|
||||
func FindInGoSearchPath(pkg string) string {
|
||||
return FindInSearchPath(FullGoSearchPath(), pkg)
|
||||
}
|
||||
|
||||
// FullGoSearchPath gets the search paths for finding packages
|
||||
func FullGoSearchPath() string {
|
||||
allPaths := os.Getenv(GOPATHKey)
|
||||
if allPaths == "" {
|
||||
allPaths = filepath.Join(os.Getenv("HOME"), "go")
|
||||
}
|
||||
if allPaths != "" {
|
||||
allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":")
|
||||
} else {
|
||||
allPaths = runtime.GOROOT()
|
||||
}
|
||||
return allPaths
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user