Update lib

This commit is contained in:
xing-yang
2023-11-15 14:58:29 +00:00
parent be851d9b8c
commit ff4faefbd6
417 changed files with 2879 additions and 1450 deletions

24
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/fsnotify/fsnotify v1.7.0
github.com/golang/mock v1.6.0
github.com/google/gofuzz v1.2.0
github.com/kubernetes-csi/csi-lib-utils v0.15.0
github.com/kubernetes-csi/csi-lib-utils v0.16.0
github.com/kubernetes-csi/csi-test/v5 v5.1.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0
github.com/prometheus/client_golang v1.17.0
@@ -31,7 +31,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
@@ -40,7 +40,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -54,18 +54,18 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.0 // indirect
go.opentelemetry.io/otel v1.15.0 // indirect
go.opentelemetry.io/otel/metric v0.38.0 // indirect
go.opentelemetry.io/otel/trace v1.15.0 // indirect
golang.org/x/net v0.17.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
go.opentelemetry.io/otel v1.20.0 // indirect
go.opentelemetry.io/otel/metric v1.20.0 // indirect
go.opentelemetry.io/otel/trace v1.20.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/term v0.14.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

50
go.sum
View File

@@ -23,8 +23,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
@@ -47,8 +47,8 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -71,8 +71,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.15.0 h1:YTMO6WilRUmjGh5/73kF4KjNcXev+V37O4bx8Uoxy5A=
github.com/kubernetes-csi/csi-lib-utils v0.15.0/go.mod h1:fsoR7g1fOfl1z0WDpA1WvWPtt4oVvgzChgSUgR3JWDw=
github.com/kubernetes-csi/csi-lib-utils v0.16.0 h1:LXCvkhXHtFOkl7LoDqFdho/MuebccZqWxLwhKiRGiBg=
github.com/kubernetes-csi/csi-lib-utils v0.16.0/go.mod h1:fp1Oik+45tP2o4X9SD/SBWXLTQYT9wtLxGasBE3+vBI=
github.com/kubernetes-csi/csi-test/v5 v5.1.0 h1:8UxFRH0W8C4RbppKYYJeOJ506C7ybngKZA5GabGgJec=
github.com/kubernetes-csi/csi-test/v5 v5.1.0/go.mod h1:LoAh2XHbXcKnCoM1WgEyviUXiLmTeCmFTsjzaNloL3k=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -113,18 +113,18 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.0 h1:pWlIooxHVVdetyXFDsuzfqV42lXVIDmVGBCHeaXzDyI=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.0/go.mod h1:YjmsSWM1VTcWXFSgyrmLADPMZZohioz9onjgkikk59w=
go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk=
go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
go.opentelemetry.io/otel/metric v0.38.0 h1:vv/Nv/44S3GzMMmeUhaesBKsAenE6xLkTVWL+zuv30w=
go.opentelemetry.io/otel/metric v0.38.0/go.mod h1:uAtxN5hl8aXh5irD8afBtSwQU5Zjg64WWSz6KheZxBg=
go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -137,8 +137,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -151,16 +151,16 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -175,8 +175,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=

View File

@@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
@@ -73,6 +74,29 @@ received:
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
When the Go developers started developing such an interface with
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
logr design but also left out some parts and changed others:
| Feature | logr | slog |
|---------|------|------|
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
| Low-level API | `LogSink` | `Handler` |
| Stack unwinding | done by `LogSink` | done by `Logger` |
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
| Passing logger via context | `NewContext`, `FromContext` | no API |
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
### Inspiration
Before you consider this package, please read [this blog post by the
@@ -118,6 +142,91 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
## Using a `logr.Sink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
`slog.GroupValue`). logr does not convert those.
Not supporting slog has several drawbacks:
- Recording source code locations works correctly if the handler gets called
through `slog.Logger`, but may be wrong in other cases. That's because a
`logr.Sink` does its own stack unwinding instead of using the program counter
provided by the high-level API.
- slog levels <= 0 can be mapped to logr levels by negating the level without a
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
because logr does not support "more important than info" levels.
- The slog group concept is supported by prefixing each key in a key/value
pair with the group names, separated by a dot. For structured output like
JSON it would be better to group the key/value pairs inside an object.
- Special slog values and interfaces don't work as expected.
- The overhead is likely to be higher.
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
and an additional attribute with `err` as key, if an error was provided.
The main drawback is that `logr.Marshaler` will not be supported. Types should
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
## FAQ
### Conceptual
@@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.)
info-type logs). For reference, slog pre-defines -4 for debug logs
(corresponds to 4 in logr), which matches what is
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys?

18
vendor/github.com/go-logr/logr/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,18 @@
# Security Policy
If you have discovered a security vulnerability in this project, please report it
privately. **Do not disclose it as a public issue.** This gives us time to work with you
to fix the issue before public exposure, reducing the chance that the exploit will be
used before a patch is released.
You may submit the report in the following ways:
- send an email to go-logr-security@googlegroups.com
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
Please provide the following information in your report:
- A description of the vulnerability and its impact
- How to reproduce the issue
We ask that you give us 90 days to work on a fix before public exposure.

View File

@@ -116,17 +116,17 @@ type Options struct {
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{}
RenderBuiltinsHook func(kvList []any) []any
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{}
RenderValuesHook func(kvList []any) []any
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{}
RenderArgsHook func(kvList []any) []any
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
@@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink {
return &l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
@@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
return &l
}
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
func (l fnlogger) Info(level int, msg string, kvList ...any) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
func (l fnlogger) Error(err error, msg string, kvList ...any) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
@@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
type Formatter struct {
outputFormat outputFormat
prefix string
values []interface{}
values []any
valuesStr string
depth int
opts *Options
@@ -246,10 +246,10 @@ const (
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{}
type PseudoStruct []any
// render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string {
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
@@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string {
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
return kvList
}
func (f Formatter) pretty(value interface{}) string {
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@@ -343,7 +343,7 @@ const (
)
// TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"`
}
@@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool {
return false
}
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
func invokeMarshaler(m logr.Marshaler) (ret any) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
@@ -675,12 +675,12 @@ func (f Formatter) caller() Caller {
const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string {
func (f Formatter) nonStringKey(v any) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string {
func (f Formatter) snippet(v any) string {
const snipLen = 16
snip := f.pretty(v)
@@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
func (f Formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
@@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int {
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -747,8 +747,8 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
// FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
var loggableErr any
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList)
return prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
@@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) {
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
func (f *Formatter) AddValues(kvList []interface{}) {
func (f *Formatter) AddValues(kvList []any) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)

View File

@@ -127,9 +127,9 @@ limitations under the License.
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
// Calling methods with the null logger (Logger{}) as instance will crash
// because it has no LogSink. Therefore this null logger should never be passed
// around. For cases where passing a logger is optional, a pointer to Logger
// The zero logger (= Logger{}) is identical to Discard() and discards all log
// entries. Code that receives a Logger by value can simply call it, the methods
// will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// # Key Naming Conventions
@@ -258,6 +258,12 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
// Some implementations of LogSink look at the caller in Enabled (e.g.
// different verbosity levels per package or file), but we only pass one
// CallDepth in (via Init). This means that all calls from Logger to the
// LogSink's Enabled, Info, and Error methods must have the same number of
// frames. In other words, Logger methods can't call other Logger methods
// which call these LogSink methods unless we do it the same in all paths.
return l.sink != nil && l.sink.Enabled(l.level)
}
@@ -267,11 +273,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
func (l Logger) Info(msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
if l.Enabled() {
if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
func (l Logger) Error(err error, msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
@@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger {
return l
}
// GetV returns the verbosity level of the logger. If the logger's LogSink is
// nil as in the Discard logger, this will always return 0.
func (l Logger) GetV() int {
// 0 if l.sink nil because of the if check in V above.
return l.level
}
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
func (l Logger) WithValues(keysAndValues ...any) Logger {
if l.sink == nil {
return l
}
@@ -467,15 +480,15 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
Info(level int, msg string, keysAndValues ...interface{})
Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{})
Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink
WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
@@ -546,5 +559,5 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
MarshalLog() interface{}
MarshalLog() any
}

View File

@@ -5,7 +5,7 @@
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
// [reflect.DeepEqual] for comparing whether two values are semantically equal.
// It is intended to only be used in tests, as performance is not a goal and
// it may panic if it cannot compare the values. Its propensity towards
// panicking means that its unsuitable for production environments where a
@@ -18,16 +18,17 @@
// For example, an equality function may report floats as equal so long as
// they are within some tolerance of each other.
//
// - Types with an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation
// for the types that they define.
// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
// to determine equality. This allows package authors to determine
// the equality operation for the types that they define.
//
// - If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
// unexported fields are not compared by default; they result in panics
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
// or explicitly compared using the Exporter option.
// unless suppressed by using an [Ignore] option
// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
// or explicitly compared using the [Exporter] option.
package cmp
import (
@@ -45,14 +46,14 @@ import (
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// - Let S be the set of all Ignore, Transformer, and Comparer options that
// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is non-zero,
// If at least one [Ignore] exists in S, then the comparison is ignored.
// If the number of [Transformer] and [Comparer] options in S is non-zero,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform
// If S contains a single [Transformer], then use that to transform
// the current values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// If S contains a single [Comparer], then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
@@ -66,21 +67,22 @@ import (
// Functions are only equal if they are both nil, otherwise they are unequal.
//
// Structs are equal if recursively calling Equal on all fields report equal.
// If a struct contains unexported fields, Equal panics unless an Ignore option
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
// explicitly permits comparing the unexported field.
// If a struct contains unexported fields, Equal panics unless an [Ignore] option
// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
// or the [Exporter] option explicitly permits comparing the unexported field.
//
// Slices are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored slice or array elements report equal.
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
// consider using cmpopts.EquateEmpty.
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
//
// Maps are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored map entries report equal.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
// To use custom comparisons for map keys, consider using
// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
// consider using cmpopts.EquateEmpty.
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
//
// Pointers and interfaces are equal if they are both nil or both non-nil,
// where they have the same underlying concrete type and recursively

View File

@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package cmp
import (
@@ -12,8 +9,6 @@ import (
"unsafe"
)
const supportExporters = true
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
// a struct such that the value has read-write permissions.
//

View File

@@ -1,16 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package cmp
import "reflect"
const supportExporters = false
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
panic("no support for forcibly accessing unexported fields")
}

View File

@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package value
import (

View File

@@ -1,34 +0,0 @@
// Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package value
import "reflect"
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
type Pointer struct {
p uintptr
t reflect.Type
}
// PointerOf returns a Pointer from v, which must be a
// reflect.Ptr, reflect.Slice, or reflect.Map.
func PointerOf(v reflect.Value) Pointer {
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
// assumes that the GC implementation does not use a moving collector.
return Pointer{v.Pointer(), v.Type()}
}
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == 0
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return p.p
}

View File

@@ -13,15 +13,15 @@ import (
"github.com/google/go-cmp/cmp/internal/function"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// Option configures for specific behavior of [Equal] and [Diff]. In particular,
// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
// The fundamental options may be composed with filters ([FilterPath] and
// [FilterValues]) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
// for creating options that may be used with [Equal] and [Diff].
type Option interface {
// filter applies all filters and returns the option that remains.
// Each option may only read s.curPath and call s.callTTBFunc.
@@ -56,9 +56,9 @@ type core struct{}
func (core) isCore() {}
// Options is a list of Option values that also satisfies the Option interface.
// Options is a list of [Option] values that also satisfies the [Option] interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// [Option] values into a single [Option]. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
@@ -105,16 +105,16 @@ func (opts Options) String() string {
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
// FilterPath returns a new [Option] where opt is only evaluated if filter f
// returns true for the current [Path] in the value tree.
//
// This filter is called even if a slice element or map entry is missing and
// provides an opportunity to ignore such cases. The filter function must be
// symmetric such that the filter result is identical regardless of whether the
// missing value is from x or y.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
// a previously filtered [Option].
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
@@ -142,7 +142,7 @@ func (f pathFilter) String() string {
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// FilterValues returns a new [Option] where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If either value is invalid or
// the type of the values is not assignable to T, then this filter implicitly
@@ -154,8 +154,8 @@ func (f pathFilter) String() string {
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
// a previously filtered [Option].
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
@@ -192,9 +192,9 @@ func (f valuesFilter) String() string {
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
// Ignore is an [Option] that causes all comparisons to be ignored.
// This value is intended to be combined with [FilterPath] or [FilterValues].
// It is an error to pass an unfiltered Ignore option to [Equal].
func Ignore() Option { return ignore{} }
type ignore struct{ core }
@@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
}
} else {
// Unnamed type with unexported fields. Derive PkgPath from field.
@@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*`
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
// Transformer returns an Option that applies a transformation function that
// Transformer returns an [Option] that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
@@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
// same transform to the output of itself (e.g., in the case where the
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
// in the tail of the [Path] since the last non-[Transform] step.
// For situations where the implicit filter is still insufficient,
// consider using cmpopts.AcyclicTransformer, which adds a filter
// to prevent the transformer from being recursively applied upon itself.
// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
// which adds a filter to prevent the transformer from
// being recursively applied upon itself.
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep (and eventually shown in the Diff output).
// The name is a user provided label that is used as the [Transform.Name] in the
// transformation [PathStep] (and eventually shown in the [Diff] output).
// The name must be a valid identifier or qualified identifier in Go syntax.
// If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
@@ -329,7 +332,7 @@ func (tr transformer) String() string {
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
}
// Comparer returns an Option that determines whether two values are equal
// Comparer returns an [Option] that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
@@ -377,35 +380,32 @@ func (cm comparer) String() string {
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
}
// Exporter returns an Option that specifies whether Equal is allowed to
// Exporter returns an [Option] that specifies whether [Equal] is allowed to
// introspect into the unexported fields of certain struct types.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// implementation of some external package may cause the result of [Equal]
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// In many cases, a custom Comparer should be used instead that defines
// In many cases, a custom [Comparer] should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// For example, the [reflect.Type] documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// comparison) and most attempts to compare *[regexp.Regexp] types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
// Both of these are accomplished using [Comparer] options:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
// option can be used to ignore all unexported fields on specified struct types.
func Exporter(f func(reflect.Type) bool) Option {
if !supportExporters {
panic("Exporter is not supported on purego builds")
}
return exporter(f)
}
@@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO
panic("not implemented")
}
// AllowUnexported returns an Options that allows Equal to forcibly introspect
// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
// unexported fields of the specified struct types.
//
// See Exporter for the proper use of this option.
// See [Exporter] for the proper use of this option.
func AllowUnexported(types ...interface{}) Option {
m := make(map[reflect.Type]bool)
for _, typ := range types {
@@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
}
// Result represents the comparison result for a single node and
// is provided by cmp when calling Report (see Reporter).
// is provided by cmp when calling Report (see [Reporter]).
type Result struct {
_ [0]func() // Make Result incomparable
flags resultFlags
@@ -445,7 +445,7 @@ func (r Result) Equal() bool {
}
// ByIgnore reports whether the node is equal because it was ignored.
// This never reports true if Equal reports false.
// This never reports true if [Result.Equal] reports false.
func (r Result) ByIgnore() bool {
return r.flags&reportByIgnore != 0
}
@@ -455,7 +455,7 @@ func (r Result) ByMethod() bool {
return r.flags&reportByMethod != 0
}
// ByFunc reports whether a Comparer function determined equality.
// ByFunc reports whether a [Comparer] function determined equality.
func (r Result) ByFunc() bool {
return r.flags&reportByFunc != 0
}
@@ -478,7 +478,7 @@ const (
reportByCycle
)
// Reporter is an Option that can be passed to Equal. When Equal traverses
// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
// the value trees, it calls PushStep as it descends into each node in the
// tree and PopStep as it ascend out of the node. The leaves of the tree are
// either compared (determined to be equal or not equal) or ignored and reported

View File

@@ -14,9 +14,9 @@ import (
"github.com/google/go-cmp/cmp/internal/value"
)
// Path is a list of PathSteps describing the sequence of operations to get
// Path is a list of [PathStep] describing the sequence of operations to get
// from some root type to the current position in the value tree.
// The first Path element is always an operation-less PathStep that exists
// The first Path element is always an operation-less [PathStep] that exists
// simply to identify the initial type.
//
// When traversing structs with embedded structs, the embedded struct will
@@ -29,8 +29,13 @@ type Path []PathStep
// a value's tree structure. Users of this package never need to implement
// these types as values of this type will be returned by this package.
//
// Implementations of this interface are
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
// Implementations of this interface:
// - [StructField]
// - [SliceIndex]
// - [MapIndex]
// - [Indirect]
// - [TypeAssertion]
// - [Transform]
type PathStep interface {
String() string
@@ -70,8 +75,9 @@ func (pa *Path) pop() {
*pa = (*pa)[:len(*pa)-1]
}
// Last returns the last PathStep in the Path.
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
// Last returns the last [PathStep] in the Path.
// If the path is empty, this returns a non-nil [PathStep]
// that reports a nil [PathStep.Type].
func (pa Path) Last() PathStep {
return pa.Index(-1)
}
@@ -79,7 +85,8 @@ func (pa Path) Last() PathStep {
// Index returns the ith step in the Path and supports negative indexing.
// A negative index starts counting from the tail of the Path such that -1
// refers to the last step, -2 refers to the second-to-last step, and so on.
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
// If index is invalid, this returns a non-nil [PathStep]
// that reports a nil [PathStep.Type].
func (pa Path) Index(i int) PathStep {
if i < 0 {
i = len(pa) + i
@@ -168,7 +175,8 @@ func (ps pathStep) String() string {
return fmt.Sprintf("{%s}", s)
}
// StructField represents a struct field access on a field called Name.
// StructField is a [PathStep] that represents a struct field access
// on a field called [StructField.Name].
type StructField struct{ *structField }
type structField struct {
pathStep
@@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
func (sf StructField) Name() string { return sf.name }
// Index is the index of the field in the parent struct type.
// See reflect.Type.Field.
// See [reflect.Type.Field].
func (sf StructField) Index() int { return sf.idx }
// SliceIndex is an index operation on a slice or array at some index Key.
// SliceIndex is a [PathStep] that represents an index operation on
// a slice or array at some index [SliceIndex.Key].
type SliceIndex struct{ *sliceIndex }
type sliceIndex struct {
pathStep
@@ -247,12 +256,12 @@ func (si SliceIndex) Key() int {
// all of the indexes to be shifted. If an index is -1, then that
// indicates that the element does not exist in the associated slice.
//
// Key is guaranteed to return -1 if and only if the indexes returned
// by SplitKeys are not the same. SplitKeys will never return -1 for
// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
// returned by SplitKeys are not the same. SplitKeys will never return -1 for
// both indexes.
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
// MapIndex is an index operation on a map at some index Key.
// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
type MapIndex struct{ *mapIndex }
type mapIndex struct {
pathStep
@@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]",
// Key is the value of the map key.
func (mi MapIndex) Key() reflect.Value { return mi.key }
// Indirect represents pointer indirection on the parent type.
// Indirect is a [PathStep] that represents pointer indirection on the parent type.
type Indirect struct{ *indirect }
type indirect struct {
pathStep
@@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ }
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
func (in Indirect) String() string { return "*" }
// TypeAssertion represents a type assertion on an interface.
// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
type TypeAssertion struct{ *typeAssertion }
type typeAssertion struct {
pathStep
@@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
// Transform is a transformation from the parent type to the current type.
// Transform is a [PathStep] that represents a transformation
// from the parent type to the current type.
type Transform struct{ *transform }
type transform struct {
pathStep
@@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ }
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
// Name is the name of the Transformer.
// Name is the name of the [Transformer].
func (tf Transform) Name() string { return tf.trans.name }
// Func is the function pointer to the transformer function.
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
// Option returns the originally constructed Transformer option.
// Option returns the originally constructed [Transformer] option.
// The == operator can be used to detect the exact option used.
func (tf Transform) Option() Option { return tf.trans }

View File

@@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
break
}
sf := t.Field(i)
if supportExporters && !isExported(sf.Name) {
if !isExported(sf.Name) {
vv = retrieveUnexportedField(v, sf, true)
}
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)

View File

@@ -20,8 +20,8 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"strings"
"time"
@@ -56,6 +56,7 @@ func SetMaxGRPCLogLength(characterCount int) {
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
//
// The function tries to connect for 30 seconds, and returns an error if no connection has been established at that point.
// The connection has zero idle timeout, i.e. it is never closed because of inactivity.
// The function automatically disables TLS and adds interceptor for logging of all gRPC messages at level 5.
// If the metricsManager is 'nil', no metrics will be recorded on the gRPC calls.
// The function behaviour can be tweaked with options.
@@ -107,7 +108,7 @@ func OnConnectionLoss(reconnect func() bool) Option {
func ExitOnConnectionLoss() func() bool {
return func() bool {
terminationMsg := "Lost connection to CSI driver, exiting"
if err := ioutil.WriteFile(terminationLogPath, []byte(terminationMsg), 0644); err != nil {
if err := os.WriteFile(terminationLogPath, []byte(terminationMsg), 0644); err != nil {
klog.Errorf("%s: %s", terminationLogPath, err)
}
klog.Exit(terminationMsg)
@@ -157,6 +158,7 @@ func connect(
grpc.WithInsecure(), // Don't use TLS, it's usually local Unix domain socket in a container.
grpc.WithBackoffMaxDelay(time.Second), // Retry every second after failure.
grpc.WithBlock(), // Block until connection succeeds.
grpc.WithIdleTimeout(time.Duration(0)), // Never close connection because of inactivity.
}
if o.timeout > 0 {

View File

@@ -19,7 +19,6 @@ package leaderelection
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
@@ -219,7 +218,7 @@ func inClusterNamespace() string {
return ns
}
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}

View File

@@ -18,15 +18,14 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/propagation"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
const (
// instrumentationName is the name of this instrumentation package.
instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// ScopeName is the instrumentation scope name.
ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
)
@@ -42,9 +41,19 @@ type config struct {
Propagators propagation.TextMapPropagator
TracerProvider trace.TracerProvider
MeterProvider metric.MeterProvider
SpanStartOptions []trace.SpanStartOption
ReceivedEvent bool
SentEvent bool
tracer trace.Tracer
meter metric.Meter
rpcServerDuration metric.Int64Histogram
rpcDuration metric.Float64Histogram
rpcRequestSize metric.Int64Histogram
rpcResponseSize metric.Int64Histogram
rpcRequestsPerRPC metric.Int64Histogram
rpcResponsesPerRPC metric.Int64Histogram
}
// Option applies an option value for a config.
@@ -53,23 +62,60 @@ type Option interface {
}
// newConfig returns a config configured with all the passed Options.
func newConfig(opts []Option) *config {
func newConfig(opts []Option, role string) *config {
c := &config{
Propagators: otel.GetTextMapPropagator(),
TracerProvider: otel.GetTracerProvider(),
MeterProvider: global.MeterProvider(),
MeterProvider: otel.GetMeterProvider(),
}
for _, o := range opts {
o.apply(c)
}
c.tracer = c.TracerProvider.Tracer(
ScopeName,
trace.WithInstrumentationVersion(SemVersion()),
)
c.meter = c.MeterProvider.Meter(
instrumentationName,
metric.WithInstrumentationVersion(SemVersion()),
ScopeName,
metric.WithInstrumentationVersion(Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
var err error
if c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration", metric.WithUnit("ms")); err != nil {
c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
metric.WithDescription("Measures the duration of inbound RPC."),
metric.WithUnit("ms"))
if err != nil {
otel.Handle(err)
}
c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
}
c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
}
c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
}
c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
}
@@ -99,6 +145,8 @@ func (o tracerProviderOption) apply(c *config) {
}
// WithInterceptorFilter returns an Option to use the request filter.
//
// Deprecated: Use stats handlers instead.
func WithInterceptorFilter(f Filter) Option {
return interceptorFilterOption{f: f}
}
@@ -132,3 +180,50 @@ func (o meterProviderOption) apply(c *config) {
func WithMeterProvider(mp metric.MeterProvider) Option {
return meterProviderOption{mp: mp}
}
// Event type that can be recorded, see WithMessageEvents.
type Event int
// Different types of events that can be recorded, see WithMessageEvents.
const (
ReceivedEvents Event = iota
SentEvents
)
type messageEventsProviderOption struct {
events []Event
}
func (m messageEventsProviderOption) apply(c *config) {
for _, e := range m.events {
switch e {
case ReceivedEvents:
c.ReceivedEvent = true
case SentEvents:
c.SentEvent = true
}
}
}
// WithMessageEvents configures the Handler to record the specified events
// (span.AddEvent) on spans. By default only summary attributes are added at the
// end of the request.
//
// Valid events are:
// - ReceivedEvents: Record the number of bytes read after every gRPC read operation.
// - SentEvents: Record the number of bytes written after every gRPC write operation.
func WithMessageEvents(events ...Event) Option {
return messageEventsProviderOption{events: events}
}
type spanStartOption struct{ opts []trace.SpanStartOption }
func (o spanStartOption) apply(c *config) {
c.SpanStartOptions = append(c.SpanStartOptions, o.opts...)
}
// WithSpanOptions configures an additional set of
// trace.SpanOptions, which are applied to each new span.
func WithSpanOptions(opts ...trace.SpanStartOption) Option {
return spanStartOption{opts}
}

View File

@@ -0,0 +1,22 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
*/
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"

View File

@@ -60,11 +60,13 @@ var (
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
// for use in a grpc.Dial call.
//
// Deprecated: Use [NewClientHandler] instead.
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
return func(
@@ -83,23 +85,33 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
return invoker(ctx, method, req, reply, cc, callOpts...)
}
name, attr := spanInfo(method, cc.Target())
var span trace.Span
ctx, span = tracer.Start(
ctx,
name,
name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
ctx, span := tracer.Start(
ctx,
name,
startOpts...,
)
defer span.End()
ctx = inject(ctx, cfg.Propagators)
if cfg.SentEvent {
messageSent.Event(ctx, 1, req)
}
err := invoker(ctx, method, req, reply, cc, callOpts...)
if cfg.ReceivedEvent {
messageReceived.Event(ctx, 1, reply)
}
if err != nil {
s, _ := status.FromError(err)
@@ -135,6 +147,9 @@ type clientStream struct {
eventsDone chan struct{}
finished chan error
receivedEvent bool
sentEvent bool
receivedMessageID int
sentMessageID int
}
@@ -152,8 +167,11 @@ func (w *clientStream) RecvMsg(m interface{}) error {
w.sendStreamEvent(errorEvent, err)
} else {
w.receivedMessageID++
if w.receivedEvent {
messageReceived.Event(w.Context(), w.receivedMessageID, m)
}
}
return err
}
@@ -162,7 +180,10 @@ func (w *clientStream) SendMsg(m interface{}) error {
err := w.ClientStream.SendMsg(m)
w.sentMessageID++
if w.sentEvent {
messageSent.Event(w.Context(), w.sentMessageID, m)
}
if err != nil {
w.sendStreamEvent(errorEvent, err)
@@ -173,7 +194,6 @@ func (w *clientStream) SendMsg(m interface{}) error {
func (w *clientStream) Header() (metadata.MD, error) {
md, err := w.ClientStream.Header()
if err != nil {
w.sendStreamEvent(errorEvent, err)
}
@@ -183,7 +203,6 @@ func (w *clientStream) Header() (metadata.MD, error) {
func (w *clientStream) CloseSend() error {
err := w.ClientStream.CloseSend()
if err != nil {
w.sendStreamEvent(errorEvent, err)
}
@@ -191,7 +210,7 @@ func (w *clientStream) CloseSend() error {
return err
}
func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream {
func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream {
events := make(chan streamEvent)
eventsDone := make(chan struct{})
finished := make(chan error)
@@ -223,6 +242,8 @@ func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.Strea
events: events,
eventsDone: eventsDone,
finished: finished,
receivedEvent: cfg.ReceivedEvent,
sentEvent: cfg.SentEvent,
}
}
@@ -235,11 +256,13 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) {
// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
// for use in a grpc.Dial call.
//
// Deprecated: Use [NewClientHandler] instead.
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
return func(
@@ -258,13 +281,19 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
return streamer(ctx, desc, cc, method, callOpts...)
}
name, attr := spanInfo(method, cc.Target())
var span trace.Span
ctx, span = tracer.Start(
ctx,
name,
name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
ctx, span := tracer.Start(
ctx,
name,
startOpts...,
)
ctx = inject(ctx, cfg.Propagators)
@@ -277,7 +306,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
span.End()
return s, err
}
stream := wrapClientStream(ctx, s, desc)
stream := wrapClientStream(ctx, s, desc, cfg)
go func() {
err := <-stream.finished
@@ -299,11 +328,13 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// Deprecated: Use [NewServerHandler] instead.
func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
return func(
@@ -321,38 +352,48 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
}
ctx = extract(ctx, cfg.Propagators)
name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
ctx, span := tracer.Start(
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
startOpts...,
)
defer span.End()
if cfg.ReceivedEvent {
messageReceived.Event(ctx, 1, req)
}
var statusCode grpc_codes.Code
defer func(t time.Time) {
elapsedTime := time.Since(t) / time.Millisecond
attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode)))
o := metric.WithAttributes(attr...)
cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), o)
}(time.Now())
before := time.Now()
resp, err := handler(ctx, req)
if err != nil {
s, _ := status.FromError(err)
if err != nil {
statusCode, msg := serverStatus(s)
span.SetStatus(statusCode, msg)
span.SetAttributes(statusCodeAttr(s.Code()))
if cfg.SentEvent {
messageSent.Event(ctx, 1, s.Proto())
}
} else {
statusCode = grpc_codes.OK
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
if cfg.SentEvent {
messageSent.Event(ctx, 1, resp)
}
}
grpcStatusCodeAttr := statusCodeAttr(s.Code())
span.SetAttributes(grpcStatusCodeAttr)
elapsedTime := time.Since(before).Milliseconds()
metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
cfg.rpcDuration.Record(ctx, float64(elapsedTime), metric.WithAttributes(metricAttrs...))
return resp, err
}
@@ -366,6 +407,9 @@ type serverStream struct {
receivedMessageID int
sentMessageID int
receivedEvent bool
sentEvent bool
}
func (w *serverStream) Context() context.Context {
@@ -377,8 +421,10 @@ func (w *serverStream) RecvMsg(m interface{}) error {
if err == nil {
w.receivedMessageID++
if w.receivedEvent {
messageReceived.Event(w.Context(), w.receivedMessageID, m)
}
}
return err
}
@@ -387,25 +433,31 @@ func (w *serverStream) SendMsg(m interface{}) error {
err := w.ServerStream.SendMsg(m)
w.sentMessageID++
if w.sentEvent {
messageSent.Event(w.Context(), w.sentMessageID, m)
}
return err
}
func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream {
func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
return &serverStream{
ServerStream: ss,
ctx: ctx,
receivedEvent: cfg.ReceivedEvent,
sentEvent: cfg.SentEvent,
}
}
// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// Deprecated: Use [NewServerHandler] instead.
func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
return func(
@@ -420,21 +472,27 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
Type: StreamServer,
}
if cfg.Filter != nil && !cfg.Filter(i) {
return handler(srv, wrapServerStream(ctx, ss))
return handler(srv, wrapServerStream(ctx, ss, cfg))
}
ctx = extract(ctx, cfg.Propagators)
name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
ctx, span := tracer.Start(
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
startOpts...,
)
defer span.End()
err := handler(srv, wrapServerStream(ctx, ss))
err := handler(srv, wrapServerStream(ctx, ss, cfg))
if err != nil {
s, _ := status.FromError(err)
statusCode, msg := serverStatus(s)
@@ -448,21 +506,25 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
}
}
// spanInfo returns a span name and all appropriate attributes from the gRPC
// method and peer address.
func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) {
attrs := []attribute.KeyValue{RPCSystemGRPC}
name, mAttrs := internal.ParseFullMethod(fullMethod)
attrs = append(attrs, mAttrs...)
attrs = append(attrs, peerAttr(peerAddress)...)
return name, attrs
// telemetryAttributes returns a span name and span and metric attributes from
// the gRPC method and peer address.
func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
name, methodAttrs := internal.ParseFullMethod(fullMethod)
peerAttrs := peerAttr(peerAddress)
attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
attrs = append(attrs, RPCSystemGRPC)
attrs = append(attrs, methodAttrs...)
metricAttrs := attrs[:1+len(methodAttrs)]
attrs = append(attrs, peerAttrs...)
return name, attrs, metricAttrs
}
// peerAttr returns attributes about the peer address.
func peerAttr(addr string) []attribute.KeyValue {
host, p, err := net.SplitHostPort(addr)
if err != nil {
return []attribute.KeyValue(nil)
return nil
}
if host == "" {
@@ -470,7 +532,7 @@ func peerAttr(addr string) []attribute.KeyValue {
}
port, err := strconv.Atoi(p)
if err != nil {
return []attribute.KeyValue(nil)
return nil
}
var attr []attribute.KeyValue

View File

@@ -24,19 +24,27 @@ import (
// ParseFullMethod returns a span name following the OpenTelemetry semantic
// conventions as well as all applicable span attribute.KeyValue attributes based
// on a gRPC's FullMethod.
//
// Parsing is consistent with grpc-go implementation:
// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39
func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
name := strings.TrimLeft(fullMethod, "/")
parts := strings.SplitN(name, "/", 2)
if len(parts) != 2 {
if !strings.HasPrefix(fullMethod, "/") {
// Invalid format, does not follow `/package.service/method`.
return name, []attribute.KeyValue(nil)
return fullMethod, nil
}
name := fullMethod[1:]
pos := strings.LastIndex(name, "/")
if pos < 0 {
// Invalid format, does not follow `/package.service/method`.
return name, nil
}
service, method := name[:pos], name[pos+1:]
var attrs []attribute.KeyValue
if service := parts[0]; service != "" {
if service != "" {
attrs = append(attrs, semconv.RPCService(service))
}
if method := parts[1]; method != "" {
if method != "" {
attrs = append(attrs, semconv.RPCMethod(method))
}
return name, attrs

View File

@@ -56,7 +56,7 @@ func (s *metadataSupplier) Keys() []string {
// requests.
// Deprecated: Unnecessary public func.
func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
c := newConfig(opts)
c := newConfig(opts, "")
c.Propagators.Inject(ctx, &metadataSupplier{
metadata: md,
})
@@ -78,7 +78,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont
// This function is meant to be used on incoming requests.
// Deprecated: Unnecessary public func.
func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
c := newConfig(opts)
c := newConfig(opts, "")
ctx = c.Propagators.Extract(ctx, &metadataSupplier{
metadata: md,
})

View File

@@ -0,0 +1,235 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
"context"
"sync/atomic"
"time"
grpc_codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
type gRPCContextKey struct{}
type gRPCContext struct {
messagesReceived int64
messagesSent int64
metricAttrs []attribute.KeyValue
}
type serverHandler struct {
*config
}
// NewServerHandler creates a stats.Handler for gRPC server.
func NewServerHandler(opts ...Option) stats.Handler {
h := &serverHandler{
config: newConfig(opts, "server"),
}
return h
}
// TagConn can attach some information to the given context.
func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
span := trace.SpanFromContext(ctx)
attrs := peerAttr(peerFromCtx(ctx))
span.SetAttributes(attrs...)
return ctx
}
// HandleConn processes the Conn stats.
func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
}
// TagRPC can attach some information to the given context.
func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
ctx = extract(ctx, h.config.Propagators)
name, attrs := internal.ParseFullMethod(info.FullMethodName)
attrs = append(attrs, RPCSystemGRPC)
ctx, _ = h.tracer.Start(
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attrs...),
)
gctx := gRPCContext{
metricAttrs: attrs,
}
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
// HandleRPC processes the RPC stats.
func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
h.handleRPC(ctx, rs)
}
type clientHandler struct {
*config
}
// NewClientHandler creates a stats.Handler for gRPC client.
func NewClientHandler(opts ...Option) stats.Handler {
h := &clientHandler{
config: newConfig(opts, "client"),
}
return h
}
// TagRPC can attach some information to the given context.
func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
name, attrs := internal.ParseFullMethod(info.FullMethodName)
attrs = append(attrs, RPCSystemGRPC)
ctx, _ = h.tracer.Start(
ctx,
name,
trace.WithSpanKind(trace.SpanKindClient),
trace.WithAttributes(attrs...),
)
gctx := gRPCContext{
metricAttrs: attrs,
}
return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
}
// HandleRPC processes the RPC stats.
func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
h.handleRPC(ctx, rs)
}
// TagConn can attach some information to the given context.
func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
span := trace.SpanFromContext(ctx)
attrs := peerAttr(cti.RemoteAddr.String())
span.SetAttributes(attrs...)
return ctx
}
// HandleConn processes the Conn stats.
func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
// no-op
}
func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) {
span := trace.SpanFromContext(ctx)
gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
var messageId int64
metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
metricAttrs = append(metricAttrs, gctx.metricAttrs...)
wctx := withoutCancel(ctx)
switch rs := rs.(type) {
case *stats.Begin:
case *stats.InPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
}
if c.ReceivedEvent {
span.AddEvent("message",
trace.WithAttributes(
semconv.MessageTypeReceived,
semconv.MessageIDKey.Int64(messageId),
semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
semconv.MessageUncompressedSizeKey.Int(rs.Length),
),
)
}
case *stats.OutPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesSent, 1)
c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
}
if c.SentEvent {
span.AddEvent("message",
trace.WithAttributes(
semconv.MessageTypeSent,
semconv.MessageIDKey.Int64(messageId),
semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
semconv.MessageUncompressedSizeKey.Int(rs.Length),
),
)
}
case *stats.OutTrailer:
case *stats.End:
var rpcStatusAttr attribute.KeyValue
if rs.Error != nil {
s, _ := status.FromError(rs.Error)
span.SetStatus(codes.Error, s.Message())
rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
} else {
rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
}
span.SetAttributes(rpcStatusAttr)
span.End()
metricAttrs = append(metricAttrs, rpcStatusAttr)
c.rpcDuration.Record(wctx, float64(rs.EndTime.Sub(rs.BeginTime)), metric.WithAttributes(metricAttrs...))
c.rpcRequestsPerRPC.Record(wctx, gctx.messagesReceived, metric.WithAttributes(metricAttrs...))
c.rpcResponsesPerRPC.Record(wctx, gctx.messagesSent, metric.WithAttributes(metricAttrs...))
default:
return
}
}
func withoutCancel(parent context.Context) context.Context {
if parent == nil {
panic("cannot create context from nil parent")
}
return withoutCancelCtx{parent}
}
type withoutCancelCtx struct {
c context.Context
}
func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (withoutCancelCtx) Done() <-chan struct{} {
return nil
}
func (withoutCancelCtx) Err() error {
return nil
}
func (w withoutCancelCtx) Value(key any) any {
return w.c.Value(key)
}
func (w withoutCancelCtx) String() string {
return "withoutCancel"
}

View File

@@ -16,11 +16,13 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// Version is the current release version of the gRPC instrumentation.
func Version() string {
return "0.41.0"
return "0.46.0"
// This string is updated by the pre_release.sh script during release
}
// SemVersion is the semantic version to be supplied to tracer/meter creation.
//
// Deprecated: Use [Version] instead.
func SemVersion() string {
return "semver:" + Version()
return Version()
}

5
vendor/go.opentelemetry.io/otel/.codespellignore generated vendored Normal file
View File

@@ -0,0 +1,5 @@
ot
fo
te
collison
consequentially

10
vendor/go.opentelemetry.io/otel/.codespellrc generated vendored Normal file
View File

@@ -0,0 +1,10 @@
# https://github.com/codespell-project/codespell
[codespell]
builtin = clear,rare,informal
check-filenames =
check-hidden =
ignore-words = .codespellignore
interactive = 1
skip = .git,go.mod,go.sum,semconv,venv,.tools
uri-ignore-words-list = *
write =

View File

@@ -2,6 +2,7 @@
Thumbs.db
.tools/
venv/
.idea/
.vscode/
*.iml
@@ -12,12 +13,14 @@ go.work.sum
gen/
/example/dice/dice
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/otel-collector/otel-collector
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/view/view
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@@ -12,8 +12,9 @@ linters:
- depguard
- errcheck
- godot
- gofmt
- gofumpt
- goimports
- gosec
- gosimple
- govet
- ineffassign
@@ -53,6 +54,20 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
# It's okay to not run gosec in a test.
- path: _test\.go
linters:
- gosec
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
linters:
- gosec
# Igonoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low."
linters:
- gosec
include:
# revive exported should have comment or be unexported.
- EXC0012
@@ -61,28 +76,63 @@ issues:
linters-settings:
depguard:
# Check the list against standard lib.
# Default: false
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- "crypto/md5"
- "crypto/sha1"
- "crypto/**/pkix"
ignore-file-rules:
- "**/*_test.go"
additional-guards:
# Do not allow testing packages in non-test files.
- list-type: denylist
include-go-root: true
packages:
- testing
- github.com/stretchr/testify
ignore-file-rules:
- "**/*_test.go"
- "**/*test/*.go"
- "**/internal/matchers/*.go"
rules:
non-tests:
files:
- "!$test"
- "!**/*test/*.go"
- "!**/internal/matchers/*.go"
deny:
- pkg: "testing"
- pkg: "github.com/stretchr/testify"
- pkg: "crypto/md5"
- pkg: "crypto/sha1"
- pkg: "crypto/**/pkix"
otlp-internal:
files:
- "!**/exporters/otlp/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
desc: Do not use cross-module internal packages.
otlptrace-internal:
files:
- "!**/exporters/otlp/otlptrace/*.go"
- "!**/exporters/otlp/otlptrace/internal/**.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
desc: Do not use cross-module internal packages.
otlpmetric-internal:
files:
- "!**/exporters/otlp/otlpmetric/internal/*.go"
- "!**/exporters/otlp/otlpmetric/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
desc: Do not use cross-module internal packages.
otel-internal:
files:
- "**/sdk/*.go"
- "**/sdk/**/*.go"
- "**/exporters/*.go"
- "**/exporters/**/*.go"
- "**/schema/*.go"
- "**/schema/**/*.go"
- "**/metric/*.go"
- "**/metric/**/*.go"
- "**/bridge/*.go"
- "**/bridge/**/*.go"
- "**/example/*.go"
- "**/example/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/internal$"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/attribute"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/internaltest"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/matchers"
desc: Do not use cross-module internal packages.
godot:
exclude:
# Exclude links.
@@ -113,7 +163,7 @@ linters-settings:
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
# TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
# TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- name: context-as-argument
disabled: true
arguments:

View File

@@ -8,6 +8,288 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.20.0/0.43.0] 2023-11-10
This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
### Added
- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
### Changed
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
This extends the `Tracer` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
This extends the `Span` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
### Fixed
- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
## [1.19.0/0.42.0/0.0.7] 2023-09-28
This release contains the first stable release of the OpenTelemetry Go [metric SDK].
Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
### Changed
- Allow '/' characters in metric instrument names. (#4501)
- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
### Fixed
- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
### Removed
- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
This is a release candidate for the v1.19.0/v0.42.0 release.
That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Changed
- Allow '/' characters in metric instrument names. (#4501)
### Fixed
- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
## [1.18.0/0.41.0/0.0.6] 2023-09-12
This release drops the compatibility guarantee of [Go 1.19].
### Added
- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
### Changed
- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
### Deprecated
- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
### Removed
- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
## [1.17.0/0.40.0/0.0.5] 2023-08-28
### Added
- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
- Add support for exponential histogram aggregations.
A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
- Support Go 1.21. (#4463)
### Changed
- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
### Removed
- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
Use the added `WithProducer` option instead. (#4346)
- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
### Fixed
- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
### Deprecated
- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
OpenTelemetry dropped support for Jaeger exporter in July 2023.
Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
## [1.16.0/0.39.0] 2023-05-18
This release contains the first stable release of the OpenTelemetry Go [metric API].
Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
### Changed
- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
### Removed
- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
Use `go.opentelemetry.io/otel/metric` instead. (#4055)
### Fixed
- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
This is a release candidate for the v1.16.0/v0.39.0 release.
That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
- Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- Use `GetMeterProivder` for a global `metric.MeterProvider`.
- Use `SetMeterProivder` to set the global `metric.MeterProvider`.
### Changed
- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
This stages the metric API to be released as a stable module. (#4038)
### Removed
- The `go.opentelemetry.io/otel/metric/global` package is removed.
Use `go.opentelemetry.io/otel` instead. (#4039)
## [1.15.1/0.38.1] 2023-05-02
### Fixed
- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
## [1.15.0/0.38.0] 2023-04-27
### Added
@@ -133,6 +415,8 @@ This release drops the compatibility guarantee of [Go 1.18].
- Handle empty environment variable as it they were not set. (#3764)
- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
### Deprecated
@@ -2437,7 +2721,15 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.15.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
@@ -2505,3 +2797,7 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace

View File

@@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
CODEOWNERS @MrAlias @MadVikingGod @pellared

View File

@@ -28,6 +28,11 @@ precommit` - the `precommit` target is the default).
The `precommit` target also fixes the formatting of the code and
checks the status of the go module files.
Additionally, there is a `codespell` target that checks for common
typos in the code. It is not run by default, but you can run it
manually with `make codespell`. It will set up a virtual environment
in `venv` and install `codespell` there.
If after running `make precommit` the output of `git status` contains
`nothing to commit, working tree clean` then it means that everything
is up-to-date and properly formatted.
@@ -150,10 +155,10 @@ Any [Maintainer] can merge the PR once the above criteria have been met.
## Design Choices
As with other OpenTelemetry clients, opentelemetry-go follows the
[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
It's especially valuable to read through the [library
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md).
guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
@@ -174,23 +179,23 @@ For a deeper discussion, see
## Documentation
Each non-example Go Module should have its own `README.md` containing:
Each (non-internal, non-test) package must be documented using
[Go Doc Comments](https://go.dev/doc/comment),
preferably in a `doc.go` file.
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
- Brief description.
- Installation instructions (and requirements if applicable).
- Hyperlink to an example. Depending on the component the example can be:
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- A sample Go application with its own `README.md`, like [here](example/zipkin).
- Additional documentation sections such us:
- Configuration,
- Contributing,
- References.
Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
instead of putting code snippets in Go doc comments.
In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
You can install and run a "local Go Doc site" in the following way:
Moreover, it should be possible to navigate to any `README.md` from the
root `README.md`.
```sh
go install golang.org/x/pkgsite/cmd/pkgsite@latest
pkgsite
```
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package.
## Style Guide
@@ -470,8 +475,33 @@ documentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
These interfaces are defined by the OpenTelemetry specification and will be
updated as the specification evolves.
Otherwise, stable interfaces MUST NOT be modified.
#### How to Change Specification Interfaces
When an API change must be made, we will update the SDK with the new method one
release before the API change. This will allow the SDK one version before the
API change to work seamlessly with the new API.
If an incompatible version of the SDK is used with the new API the application
will fail to compile.
#### How Not to Change Specification Interfaces
We have explored using a v2 of the API to change interfaces and found that there
was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
Problems happened with libraries that upgraded to v2 when an application did not,
and would not produce any telemetry.
More detail of the approaches considered and their limitations can be found in
the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
issue.
#### How to Change Other Interfaces
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
@@ -526,6 +556,37 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
### Testing
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
absence of race conditions.
### Internal packages
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
between the two modules where a user can upgrade the parent without the child
and if the internal package API has changed it will fail to upgrade[^3].
There are two known exceptions to this rule:
- `go.opentelemetry.io/otel/internal/global`
- This package manages global state for all of opentelemetry-go. It needs to
be a single package in order to ensure the uniqueness of the global state.
- `go.opentelemetry.io/otel/internal/baggage`
- This package provides values in a `context.Context` that need to be
recognized by `go.opentelemetry.io/otel/baggage` and
`go.opentelemetry.io/otel/bridge/opentracing` but remain private.
If you have duplicate code in multiple modules, make that code into a Go
template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
to render the templates in the desired locations. See [#4404] for an example of
this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
## Approvers and Maintainers
### Approvers
@@ -533,14 +594,14 @@ interface that defines the specific functionality should be preferred.
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Robert Pająk](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
@@ -555,3 +616,5 @@ repo](https://github.com/open-telemetry/community/blob/main/community-membership
[Approver]: #approvers
[Maintainer]: #maintainers
[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404

View File

@@ -25,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@@ -71,21 +71,78 @@ $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
GOTMPL = $(TOOLS)/gotmpl
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
GORELEASE = $(TOOLS)/gorelease
$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
# The directory where the virtual environment is created.
VENVDIR := venv
# The directory where the python tools are installed.
PYTOOLS := $(VENVDIR)/bin
# The pip executable in the virtual environment.
PIP := $(PYTOOLS)/pip
# The directory in the docker image where the current directory is mounted.
WORKDIR := /workdir
# The python image to use for the virtual environment.
PYTHONIMAGE := python:3.11.3-slim-bullseye
# Run the python image with the current directory mounted.
DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
# Create a virtual environment for Python tools.
$(PYTOOLS):
# The `--upgrade` flag is needed to ensure that the virtual environment is
# created with the latest pip version.
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
# Install python packages into the virtual environment.
$(PYTOOLS)/%: | $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
$(CODESPELL): PACKAGE=codespell
# Generate
.PHONY: generate
generate: go-generate vanity-import-fix
.PHONY: go-generate
go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
go-generate/%: DIR=$*
go-generate/%: | $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
# Generate go.work file for local development.
.PHONY: go-work
go-work: | $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd)
# Build
.PHONY: generate build
.PHONY: build
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
generate/%: | $(STRINGER) $(PORTO)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build/%: DIR=$*
build/%:
@echo "$(GO) build $(DIR)/..." \
@@ -135,6 +192,18 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
BENCHMARK_FILTER = .
# You can override the filter for a particular directory by adding a rule here.
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
benchmark/%:
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
&& cd $* \
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
@@ -156,30 +225,38 @@ go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy -compat=1.19
&& $(GO) mod tidy -compat=1.20
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint
lint: misspell lint-modules golangci-lint
lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell
misspell: | $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$*
govulncheck/%: | $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \
&& $(GOVULNCHECK) ./...
.PHONY: codespell
codespell: | $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
@@ -189,7 +266,7 @@ license-check:
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check
dependabot-check: | $(DBOTCONF)
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
.PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF)
@@ -208,13 +285,23 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
gorelease/%: DIR=$*
gorelease/%:| $(GORELEASE)
@echo "gorelease in $(DIR):" \
&& cd $(DIR) \
&& $(GORELEASE) \
|| echo ""
.PHONY: prerelease
prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
@@ -225,3 +312,7 @@ COMMIT ?= "HEAD"
add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown
lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md

View File

@@ -11,22 +11,22 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
| Metrics | Beta | N/A |
| Logs | Frozen [1] | N/A |
| Signal | Status |
|---------|------------|
| Traces | Stable |
| Metrics | Stable |
| Logs | Design [1] |
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our local
Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
and
[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
Project versioning information and stability guarantees can be found in the
[versioning documentation](./VERSIONING.md).
[versioning documentation](VERSIONING.md).
### Compatibility
@@ -49,17 +49,17 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
|---------|------------|--------------|
| Ubuntu | 1.21 | amd64 |
| Ubuntu | 1.20 | amd64 |
| Ubuntu | 1.19 | amd64 |
| Ubuntu | 1.21 | 386 |
| Ubuntu | 1.20 | 386 |
| Ubuntu | 1.19 | 386 |
| MacOS | 1.21 | amd64 |
| MacOS | 1.20 | amd64 |
| MacOS | 1.19 | amd64 |
| Windows | 1.21 | amd64 |
| Windows | 1.20 | amd64 |
| Windows | 1.19 | amd64 |
| Windows | 1.21 | 386 |
| Windows | 1.20 | 386 |
| Windows | 1.19 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
@@ -97,8 +97,7 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
| :-----------------------------------: | :-----: | :----: |
| [Jaeger](./exporters/jaeger/) | | ✓ |
|---------------------------------------|:-------:|:------:|
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |

View File

@@ -2,28 +2,30 @@
## Semantic Convention Generation
New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
3. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
export TAG="v1.13.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG"
export TAG="v1.21.0" # Change to the release version you are generating.
export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
**Note**, the generation code was changed to generate versions >= 1.13.
To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
## Breaking changes validation
You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
## Pre-Release
@@ -121,7 +123,17 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
### Demo Repository
Bump the dependencies in the following Go services:
- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)

60
vendor/go.opentelemetry.io/otel/attribute/filter.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute"
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
type Filter func(KeyValue) bool
// NewAllowKeysFilter returns a Filter that only allows attributes with one of
// the provided keys.
//
// If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 {
return func(kv KeyValue) bool { return false }
}
allowed := make(map[Key]struct{})
for _, k := range keys {
allowed[k] = struct{}{}
}
return func(kv KeyValue) bool {
_, ok := allowed[kv.Key]
return ok
}
}
// NewDenyKeysFilter returns a Filter that only allows attributes
// that do not have one of the provided keys.
//
// If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 {
return func(kv KeyValue) bool { return true }
}
forbid := make(map[Key]struct{})
for _, k := range keys {
forbid[k] = struct{}{}
}
return func(kv KeyValue) bool {
_, ok := forbid[kv.Key]
return !ok
}
}

View File

@@ -39,13 +39,6 @@ type (
iface interface{}
}
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
Filter func(KeyValue) bool
// Sortable implements sort.Interface, used for sorting KeyValue. This is
// an exported type to support a memory optimization. A pointer to one of
// these is needed for the call to sort.Stable(), which the caller may

View File

@@ -61,11 +61,6 @@ type Property struct {
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewKeyProperty returns a new Property for key.
@@ -76,7 +71,7 @@ func NewKeyProperty(key string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{key: key, hasData: true}
p := Property{key: key}
return p, nil
}
@@ -95,7 +90,6 @@ func NewKeyValueProperty(key, value string) (Property, error) {
key: key,
value: value,
hasValue: true,
hasData: true,
}
return p, nil
}
@@ -117,7 +111,7 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
p := Property{hasData: true}
var p Property
if match[1] != "" {
p.key = match[1]
} else {
@@ -136,10 +130,6 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
if !p.hasData {
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
}
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
@@ -264,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
decodedValue, err := url.QueryUnescape(value)
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@@ -289,31 +279,29 @@ func parseMember(member string) (Member, error) {
props properties
)
parts := strings.SplitN(member, propertyDelimiter, 2)
switch len(parts) {
case 2:
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
for _, pStr := range strings.Split(properties, propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
}
props = append(props, p)
}
fallthrough
case 1:
}
// Parse the member key/value pair.
// Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 {
k, v, found := strings.Cut(keyValue, keyValueDelimiter)
if !found {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(kv[0])
key = strings.TrimSpace(k)
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
value, err = url.PathUnescape(strings.TrimSpace(v))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
@@ -323,12 +311,6 @@ func parseMember(member string) (Member, error) {
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
// the bug to slip past the CI checks.
panic("failed to parse baggage member")
}
return Member{key: key, value: value, properties: props, hasData: true}, nil
}

View File

@@ -16,6 +16,6 @@
Package codes defines the canonical error codes used by OpenTelemetry.
It conforms to [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
*/
package codes // import "go.opentelemetry.io/otel/codes"

29
vendor/go.opentelemetry.io/otel/internal/gen.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/internal"
//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go

View File

@@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
"unsafe"
)
var (
@@ -42,7 +41,7 @@ type ErrorHandler interface {
}
type ErrDelegator struct {
delegate unsafe.Pointer
delegate atomic.Pointer[ErrorHandler]
}
func (d *ErrDelegator) Handle(err error) {
@@ -50,12 +49,12 @@ func (d *ErrDelegator) Handle(err error) {
}
func (d *ErrDelegator) getDelegate() ErrorHandler {
return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
return *d.delegate.Load()
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
d.delegate.Store(&eh)
}
func defaultErrorHandler() *ErrDelegator {

View File

@@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"context"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
)
@@ -35,16 +34,18 @@ type afCounter struct {
name string
opts []metric.Float64ObservableCounterOption
delegate atomic.Value //metric.Float64ObservableCounter
delegate atomic.Value // metric.Float64ObservableCounter
}
var _ unwrapper = (*afCounter)(nil)
var _ metric.Float64ObservableCounter = (*afCounter)(nil)
var (
_ unwrapper = (*afCounter)(nil)
_ metric.Float64ObservableCounter = (*afCounter)(nil)
)
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -64,16 +65,18 @@ type afUpDownCounter struct {
name string
opts []metric.Float64ObservableUpDownCounterOption
delegate atomic.Value //metric.Float64ObservableUpDownCounter
delegate atomic.Value // metric.Float64ObservableUpDownCounter
}
var _ unwrapper = (*afUpDownCounter)(nil)
var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
var (
_ unwrapper = (*afUpDownCounter)(nil)
_ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
)
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -93,16 +96,18 @@ type afGauge struct {
name string
opts []metric.Float64ObservableGaugeOption
delegate atomic.Value //metric.Float64ObservableGauge
delegate atomic.Value // metric.Float64ObservableGauge
}
var _ unwrapper = (*afGauge)(nil)
var _ metric.Float64ObservableGauge = (*afGauge)(nil)
var (
_ unwrapper = (*afGauge)(nil)
_ metric.Float64ObservableGauge = (*afGauge)(nil)
)
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -122,16 +127,18 @@ type aiCounter struct {
name string
opts []metric.Int64ObservableCounterOption
delegate atomic.Value //metric.Int64ObservableCounter
delegate atomic.Value // metric.Int64ObservableCounter
}
var _ unwrapper = (*aiCounter)(nil)
var _ metric.Int64ObservableCounter = (*aiCounter)(nil)
var (
_ unwrapper = (*aiCounter)(nil)
_ metric.Int64ObservableCounter = (*aiCounter)(nil)
)
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -151,16 +158,18 @@ type aiUpDownCounter struct {
name string
opts []metric.Int64ObservableUpDownCounterOption
delegate atomic.Value //metric.Int64ObservableUpDownCounter
delegate atomic.Value // metric.Int64ObservableUpDownCounter
}
var _ unwrapper = (*aiUpDownCounter)(nil)
var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
var (
_ unwrapper = (*aiUpDownCounter)(nil)
_ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
)
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -180,16 +189,18 @@ type aiGauge struct {
name string
opts []metric.Int64ObservableGaugeOption
delegate atomic.Value //metric.Int64ObservableGauge
delegate atomic.Value // metric.Int64ObservableGauge
}
var _ unwrapper = (*aiGauge)(nil)
var _ metric.Int64ObservableGauge = (*aiGauge)(nil)
var (
_ unwrapper = (*aiGauge)(nil)
_ metric.Int64ObservableGauge = (*aiGauge)(nil)
)
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -209,7 +220,7 @@ type sfCounter struct {
name string
opts []metric.Float64CounterOption
delegate atomic.Value //metric.Float64Counter
delegate atomic.Value // metric.Float64Counter
}
var _ metric.Float64Counter = (*sfCounter)(nil)
@@ -217,7 +228,7 @@ var _ metric.Float64Counter = (*sfCounter)(nil)
func (i *sfCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -235,7 +246,7 @@ type sfUpDownCounter struct {
name string
opts []metric.Float64UpDownCounterOption
delegate atomic.Value //metric.Float64UpDownCounter
delegate atomic.Value // metric.Float64UpDownCounter
}
var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
@@ -243,7 +254,7 @@ var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -261,7 +272,7 @@ type sfHistogram struct {
name string
opts []metric.Float64HistogramOption
delegate atomic.Value //metric.Float64Histogram
delegate atomic.Value // metric.Float64Histogram
}
var _ metric.Float64Histogram = (*sfHistogram)(nil)
@@ -269,7 +280,7 @@ var _ metric.Float64Histogram = (*sfHistogram)(nil)
func (i *sfHistogram) setDelegate(m metric.Meter) {
ctr, err := m.Float64Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -287,7 +298,7 @@ type siCounter struct {
name string
opts []metric.Int64CounterOption
delegate atomic.Value //metric.Int64Counter
delegate atomic.Value // metric.Int64Counter
}
var _ metric.Int64Counter = (*siCounter)(nil)
@@ -295,7 +306,7 @@ var _ metric.Int64Counter = (*siCounter)(nil)
func (i *siCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -313,7 +324,7 @@ type siUpDownCounter struct {
name string
opts []metric.Int64UpDownCounterOption
delegate atomic.Value //metric.Int64UpDownCounter
delegate atomic.Value // metric.Int64UpDownCounter
}
var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
@@ -321,7 +332,7 @@ var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
func (i *siUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
@@ -339,7 +350,7 @@ type siHistogram struct {
name string
opts []metric.Int64HistogramOption
delegate atomic.Value //metric.Int64Histogram
delegate atomic.Value // metric.Int64Histogram
}
var _ metric.Int64Histogram = (*siHistogram)(nil)
@@ -347,7 +358,7 @@ var _ metric.Int64Histogram = (*siHistogram)(nil)
func (i *siHistogram) setDelegate(m metric.Meter) {
ctr, err := m.Int64Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)

View File

@@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
"unsafe"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
@@ -28,7 +27,7 @@ import (
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
var globalLogger unsafe.Pointer
var globalLogger atomic.Pointer[logr.Logger]
func init() {
SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
@@ -40,11 +39,11 @@ func init() {
// To see Info messages use a logger with `l.V(4).Enabled() == true`
// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
func SetLogger(l logr.Logger) {
atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
globalLogger.Store(&l)
}
func getLogger() logr.Logger {
return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
return *globalLogger.Load()
}
// Info prints messages about the general state of the API or SDK.

View File

@@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"container/list"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
)
@@ -37,11 +36,6 @@ type meterProvider struct {
delegate metric.MeterProvider
}
type il struct {
name string
version string
}
// setDelegate configures p to delegate all MeterProvider functionality to
// provider.
//
@@ -340,7 +334,7 @@ func (c *registration) setDelegate(m metric.Meter) {
reg, err := m.RegisterCallback(c.function, insts...)
if err != nil {
otel.Handle(err)
GetErrorHandler().Handle(err)
}
c.unreg = reg.Unregister

View File

@@ -19,6 +19,7 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@@ -31,14 +32,20 @@ type (
propagatorsHolder struct {
tm propagation.TextMapPropagator
}
meterProviderHolder struct {
mp metric.MeterProvider
}
)
var (
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalMeterProvider = defaultMeterProvider()
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
delegateMeterOnce sync.Once
)
// TracerProvider is the internal implementation for global.TracerProvider.
@@ -102,6 +109,34 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) {
globalPropagators.Store(propagatorsHolder{tm: p})
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeterProvider.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
current := MeterProvider()
if _, cOk := current.(*meterProvider); cOk {
if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
// Do not assign the default delegating MeterProvider to delegate
// to itself.
Error(
errors.New("no delegate configured in meter provider"),
"Setting meter provider to it's current value. No delegate will be configured",
)
return
}
}
delegateMeterOnce.Do(func() {
if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})
@@ -113,3 +148,9 @@ func defaultPropagatorsValue() *atomic.Value {
v.Store(propagatorsHolder{tm: newTextMapPropagator()})
return v
}
func defaultMeterProvider() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: &meterProvider{}})
return v
}

View File

@@ -39,6 +39,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
// tracerProvider is a placeholder for a configured SDK TracerProvider.
@@ -46,6 +47,8 @@ import (
// All TracerProvider functionality is forwarded to a delegate once
// configured.
type tracerProvider struct {
embedded.TracerProvider
mtx sync.Mutex
tracers map[il]*tracer
delegate trace.TracerProvider
@@ -119,6 +122,8 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
embedded.Tracer
name string
opts []trace.TracerOption
provider *tracerProvider
@@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
// SpanContext. It performs no operations other than to return the wrapped
// SpanContext.
type nonRecordingSpan struct {
embedded.Span
sc trace.SpanContext
tracer *tracer
}

View File

@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/global"
package otel // import "go.opentelemetry.io/otel"
import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/internal/global"
)
// Meter returns a Meter from the global MeterProvider. The name must be the
@@ -32,14 +32,18 @@ import (
// the new MeterProvider.
//
// This is short for GetMeterProvider().Meter(name).
func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return MeterProvider().Meter(instrumentationName, opts...)
func Meter(name string, opts ...metric.MeterOption) metric.Meter {
return GetMeterProvider().Meter(name, opts...)
}
// MeterProvider returns the registered global meter provider.
// GetMeterProvider returns the registered global meter provider.
//
// If no global MeterProvider has been registered, a No-op MeterProvider implementation is returned. When a global MeterProvider is registered for the first time, the returned MeterProvider, and all the Meters it has created or will create, are recreated automatically from the new MeterProvider.
func MeterProvider() metric.MeterProvider {
// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
// implementation is returned. When a global GetMeterProvider is registered for
// the first time, the returned GetMeterProvider, and all the Meters it has
// created or will create, are recreated automatically from the new
// GetMeterProvider.
func GetMeterProvider() metric.MeterProvider {
return global.MeterProvider()
}

View File

@@ -37,10 +37,13 @@ type Float64Observable interface {
// assumed the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// package documentation on API implementation for information on how to set
// default behavior for
// unimplemented methods.
type Float64ObservableCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableCounter
Float64Observable
@@ -80,8 +83,9 @@ func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
}
// Float64ObservableCounterOption applies options to a
// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and [Option]
// for other options that can be used as a Float64ObservableCounterOption.
// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
// [InstrumentOption] for other options that can be used as a
// Float64ObservableCounterOption.
type Float64ObservableCounterOption interface {
applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
}
@@ -92,10 +96,12 @@ type Float64ObservableCounterOption interface {
// the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64ObservableUpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableUpDownCounter
Float64Observable
@@ -136,7 +142,7 @@ func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
// Float64ObservableUpDownCounterOption applies options to a
// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
// [Option] for other options that can be used as a
// [InstrumentOption] for other options that can be used as a
// Float64ObservableUpDownCounterOption.
type Float64ObservableUpDownCounterOption interface {
applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
@@ -147,10 +153,12 @@ type Float64ObservableUpDownCounterOption interface {
// are only made within a callback for this instrument.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64ObservableGauge interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableGauge
Float64Observable
@@ -191,7 +199,7 @@ func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
// Float64ObservableGaugeOption applies options to a
// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
// [Option] for other options that can be used as a
// [InstrumentOption] for other options that can be used as a
// Float64ObservableGaugeOption.
type Float64ObservableGaugeOption interface {
applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
@@ -200,14 +208,19 @@ type Float64ObservableGaugeOption interface {
// Float64Observer is a recorder of float64 measurements.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Observer
// Observe records the float64 value.
Observe(value float64, opts ...ObserveOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Observe(value float64, options ...ObserveOption)
}
// Float64Callback is a function registered with a Meter that makes

View File

@@ -37,10 +37,12 @@ type Int64Observable interface {
// assumed the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableCounter
Int64Observable
@@ -80,8 +82,9 @@ func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
}
// Int64ObservableCounterOption applies options to a
// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and [Option] for
// other options that can be used as an Int64ObservableCounterOption.
// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
// [InstrumentOption] for other options that can be used as an
// Int64ObservableCounterOption.
type Int64ObservableCounterOption interface {
applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
}
@@ -92,10 +95,12 @@ type Int64ObservableCounterOption interface {
// be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableUpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableUpDownCounter
Int64Observable
@@ -136,7 +141,7 @@ func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
// Int64ObservableUpDownCounterOption applies options to a
// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
// [Option] for other options that can be used as an
// [InstrumentOption] for other options that can be used as an
// Int64ObservableUpDownCounterOption.
type Int64ObservableUpDownCounterOption interface {
applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
@@ -147,10 +152,12 @@ type Int64ObservableUpDownCounterOption interface {
// only made within a callback for this instrument.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableGauge interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableGauge
Int64Observable
@@ -190,8 +197,9 @@ func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
}
// Int64ObservableGaugeOption applies options to a
// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and [Option] for
// other options that can be used as an Int64ObservableGaugeOption.
// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
// [InstrumentOption] for other options that can be used as an
// Int64ObservableGaugeOption.
type Int64ObservableGaugeOption interface {
applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
}
@@ -199,14 +207,19 @@ type Int64ObservableGaugeOption interface {
// Int64Observer is a recorder of int64 measurements.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Observer
// Observe records the int64 value.
Observe(value int64, opts ...ObserveOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Observe(value int64, options ...ObserveOption)
}
// Int64Callback is a function registered with a Meter that makes observations

View File

@@ -35,14 +35,14 @@ all instruments fall into two overlapping logical categories: asynchronous or
synchronous, and int64 or float64.
All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
[Int64Histogram], [Float64Counter], [Float64UpDownCounter], [Float64Histogram])
are used to measure the operation and performance of source code during the
source code execution. These instruments only make measurements when the source
code they instrument is run.
[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
[Float64Histogram]) are used to measure the operation and performance of source
code during the source code execution. These instruments only make measurements
when the source code they instrument is run.
All asynchronous instruments ([Int64ObservableCounter],
[Int64ObservableUpDownCounter], [Int64ObservableGauge],
[Float64ObservableCounter], [Float64ObservableUpDownCounter],
[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
[Float64ObservableGauge]) are used to measure metrics outside of the execution
of source code. They are said to make "observations" via a callback function
called once every measurement collection cycle.
@@ -53,21 +53,54 @@ categories to use.
Outside of these two broad categories, instruments are described by the
function they are designed to serve. All Counters ([Int64Counter],
[Float64Counter], [Int64ObservableCounter], [Float64ObservableCounter]) are
[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
designed to measure values that never decrease in value, but instead only
incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
[Float64UpDownCounter], [Int64ObservableUpDownCounter],
[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
values that can increase and decrease. When more information
needs to be conveyed about all the synchronous measurements made during a
collection cycle, a Histogram ([Int64Histogram], [Float64Histogram]) should be
used. Finally, when just the most recent measurement needs to be conveyed about an
asynchronous measurement, a Gauge ([Int64ObservableGauge],
values that can increase and decrease. When more information needs to be
conveyed about all the synchronous measurements made during a collection cycle,
a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
when just the most recent measurement needs to be conveyed about an
asynchronous measurement, a Gauge ([Int64ObservableGauge] and
[Float64ObservableGauge]) should be used.
See the [OpenTelemetry documentation] for more information about instruments
and their intended use.
# Measurements
Measurements are made by recording values and information about the values with
an instrument. How these measurements are recorded depends on the instrument.
Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
[Float64Histogram]) are recorded using the instrument methods directly. All
counter instruments have an Add method that is used to measure an increment
value, and all histogram instruments have a Record method to measure a data
point.
Asynchronous instruments ([Int64ObservableCounter],
[Int64ObservableUpDownCounter], [Int64ObservableGauge],
[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
[Float64ObservableGauge]) record measurements within a callback function. The
callback is registered with the Meter which ensures the callback is called once
per collection cycle. A callback can be registered two ways: during the
instrument's creation using an option, or later using the RegisterCallback
method of the [Meter] that created the instrument.
If the following criteria are met, an option ([WithInt64Callback] or
[WithFloat64Callback]) can be used during the asynchronous instrument's
creation to register a callback ([Int64Callback] or [Float64Callback],
respectively):
- The measurement process is known when the instrument is created
- Only that instrument will make a measurement within the callback
- The callback never needs to be unregistered
If the criteria are not met, use the RegisterCallback method of the [Meter] that
created the instrument to register a [Callback].
# API Implementations
This package does not conform to the standard Go versioning policy, all of its
@@ -116,7 +149,7 @@ of [go.opentelemetry.io/otel/metric].
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
an author who want to default to silently dropping the call can use
an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/metric/noop]:
import "go.opentelemetry.io/otel/metric/noop"

View File

@@ -39,6 +39,12 @@ type InstrumentOption interface {
Float64ObservableGaugeOption
}
// HistogramOption applies options to histogram instruments.
type HistogramOption interface {
Int64HistogramOption
Float64HistogramOption
}
type descOpt string
func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
@@ -167,10 +173,29 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
}
// WithUnit sets the instrument unit.
//
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
//
// This option is considered "advisory", and may be ignored by API implementations.
func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
type bucketOpt []float64
func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
// AddOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as a AddOption.
// [MeasurementOption] for other options that can be used as an AddOption.
type AddOption interface {
applyAdd(AddConfig) AddConfig
}

View File

@@ -1,68 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// htmp://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
"errors"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
)
var (
globalMeterProvider = defaultMeterProvider()
delegateMeterOnce sync.Once
)
type meterProviderHolder struct {
mp metric.MeterProvider
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeterProvider.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
current := MeterProvider()
if _, cOk := current.(*meterProvider); cOk {
if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
// Do not assign the default delegating MeterProvider to delegate
// to itself.
global.Error(
errors.New("no delegate configured in meter provider"),
"Setting meter provider to it's current value. No delegate will be configured",
)
return
}
}
delegateMeterOnce.Do(func() {
if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
func defaultMeterProvider() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: &meterProvider{}})
return v
}

View File

@@ -27,6 +27,9 @@ import (
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type MeterProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.MeterProvider
// Meter returns a new Meter with the provided name and configuration.
@@ -47,60 +50,100 @@ type MeterProvider interface {
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Meter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Meter
// Int64Counter returns a new instrument identified by name and configured
// with options. The instrument is used to synchronously record increasing
// int64 measurements during a computational operation.
// Int64Counter returns a new Int64Counter instrument identified by name
// and configured with options. The instrument is used to synchronously
// record increasing int64 measurements during a computational operation.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
// Int64UpDownCounter returns a new instrument identified by name and
// configured with options. The instrument is used to synchronously record
// int64 measurements during a computational operation.
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational
// operation.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
// Int64Histogram returns a new instrument identified by name and
// configured with options. The instrument is used to synchronously record
// the distribution of int64 measurements during a computational operation.
// Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a
// computational operation.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
// Int64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing int64 measurements once per a measurement collection cycle.
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
// Int64ObservableUpDownCounter returns a new instrument identified by name
// and configured with options. The instrument is used to asynchronously
// record int64 measurements once per a measurement collection cycle.
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record int64 measurements once per
// a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
// Int64ObservableGauge returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// instantaneous int64 measurements once per a measurement collection
// cycle.
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous int64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
// Float64Counter returns a new instrument identified by name and
// configured with options. The instrument is used to synchronously record
// increasing float64 measurements during a computational operation.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
// Float64UpDownCounter returns a new instrument identified by name and
// configured with options. The instrument is used to synchronously record
// float64 measurements during a computational operation.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new instrument identified by name and
// configured with options. The instrument is used to synchronously record
// the distribution of float64 measurements during a computational
// operation.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
// Float64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing float64 measurements once per a measurement collection cycle.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new instrument identified by
// Float64Counter returns a new Float64Counter instrument identified by
// name and configured with options. The instrument is used to
// asynchronously record float64 measurements once per a measurement
// collection cycle.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new instrument identified by name and
// synchronously record increasing float64 measurements during a
// computational operation.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational
// operation.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a
// computational operation.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
// measurements once per a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// instantaneous float64 measurements once per a measurement collection
// cycle.
// float64 measurements once per a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous float64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
// RegisterCallback registers f to be called during the collection of a
@@ -114,6 +157,8 @@ type Meter interface {
//
// If no instruments are passed, f should not be registered nor called
// during collection.
//
// The function f needs to be concurrent safe.
RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
}
@@ -137,6 +182,9 @@ type Callback func(context.Context, Observer) error
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Observer
// ObserveFloat64 records the float64 value for obsrv.
@@ -152,6 +200,9 @@ type Observer interface {
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Registration interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Registration
// Unregister removes the callback registration from a Meter.

View File

@@ -23,14 +23,19 @@ import (
// Float64Counter is an instrument that records increasing float64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Counter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Counter
// Add records a change to the counter.
Add(ctx context.Context, incr float64, opts ...AddOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
}
// Float64CounterConfig contains options for synchronous counter instruments that
@@ -61,7 +66,8 @@ func (c Float64CounterConfig) Unit() string {
}
// Float64CounterOption applies options to a [Float64CounterConfig]. See
// [Option] for other options that can be used as a Float64CounterOption.
// [InstrumentOption] for other options that can be used as a
// Float64CounterOption.
type Float64CounterOption interface {
applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
}
@@ -70,14 +76,19 @@ type Float64CounterOption interface {
// float64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64UpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64UpDownCounter
// Add records a change to the counter.
Add(ctx context.Context, incr float64, opts ...AddOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
}
// Float64UpDownCounterConfig contains options for synchronous counter
@@ -108,8 +119,8 @@ func (c Float64UpDownCounterConfig) Unit() string {
}
// Float64UpDownCounterOption applies options to a
// [Float64UpDownCounterConfig]. See [Option] for other options that can be
// used as a Float64UpDownCounterOption.
// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
// can be used as a Float64UpDownCounterOption.
type Float64UpDownCounterOption interface {
applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
}
@@ -118,14 +129,19 @@ type Float64UpDownCounterOption interface {
// values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Histogram interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Histogram
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr float64, opts ...RecordOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr float64, options ...RecordOption)
}
// Float64HistogramConfig contains options for synchronous counter instruments
@@ -133,6 +149,7 @@ type Float64Histogram interface {
type Float64HistogramConfig struct {
description string
unit string
explicitBucketBoundaries []float64
}
// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
@@ -155,8 +172,14 @@ func (c Float64HistogramConfig) Unit() string {
return c.unit
}
// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
return c.explicitBucketBoundaries
}
// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
// [Option] for other options that can be used as a Float64HistogramOption.
// [InstrumentOption] for other options that can be used as a
// Float64HistogramOption.
type Float64HistogramOption interface {
applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
}

View File

@@ -23,14 +23,19 @@ import (
// Int64Counter is an instrument that records increasing int64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Counter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Counter
// Add records a change to the counter.
Add(ctx context.Context, incr int64, opts ...AddOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
}
// Int64CounterConfig contains options for synchronous counter instruments that
@@ -60,8 +65,9 @@ func (c Int64CounterConfig) Unit() string {
return c.unit
}
// Int64CounterOption applies options to a [Int64CounterConfig]. See [Option]
// for other options that can be used as an Int64CounterOption.
// Int64CounterOption applies options to a [Int64CounterConfig]. See
// [InstrumentOption] for other options that can be used as an
// Int64CounterOption.
type Int64CounterOption interface {
applyInt64Counter(Int64CounterConfig) Int64CounterConfig
}
@@ -70,14 +76,19 @@ type Int64CounterOption interface {
// int64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64UpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64UpDownCounter
// Add records a change to the counter.
Add(ctx context.Context, incr int64, opts ...AddOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
}
// Int64UpDownCounterConfig contains options for synchronous counter
@@ -108,7 +119,7 @@ func (c Int64UpDownCounterConfig) Unit() string {
}
// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
// See [Option] for other options that can be used as an
// See [InstrumentOption] for other options that can be used as an
// Int64UpDownCounterOption.
type Int64UpDownCounterOption interface {
applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
@@ -118,14 +129,19 @@ type Int64UpDownCounterOption interface {
// values.
//
// Warning: Methods may be added to this interface in minor releases. See
// [go.opentelemetry.io/otel/metric] package documentation on API
// implementation for information on how to set default behavior for
// unimplemented methods.
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Histogram interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Histogram
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr int64, opts ...RecordOption)
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr int64, options ...RecordOption)
}
// Int64HistogramConfig contains options for synchronous counter instruments
@@ -133,6 +149,7 @@ type Int64Histogram interface {
type Int64HistogramConfig struct {
description string
unit string
explicitBucketBoundaries []float64
}
// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
@@ -155,8 +172,14 @@ func (c Int64HistogramConfig) Unit() string {
return c.unit
}
// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
return c.explicitBucketBoundaries
}
// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
// [Option] for other options that can be used as an Int64HistogramOption.
// [InstrumentOption] for other options that can be used as an
// Int64HistogramOption.
type Int64HistogramOption interface {
applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
}

View File

@@ -40,8 +40,10 @@ const (
// their proprietary information.
type TraceContext struct{}
var _ TextMapPropagator = TraceContext{}
var traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
var (
_ TextMapPropagator = TraceContext{}
traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
)
// Inject set tracecontext from the Context into the carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {

1
vendor/go.opentelemetry.io/otel/requirements.txt generated vendored Normal file
View File

@@ -0,0 +1 @@
codespell==2.2.6

View File

@@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
c.stackTrace = bool(o)
return c
}
func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
c.stackTrace = bool(o)
return c

View File

@@ -62,5 +62,69 @@ a default.
defer span.End()
// ...
}
# API Implementations
This package does not conform to the standard Go versioning policy; all of its
interfaces may have methods added to them without a package major version bump.
This non-standard API evolution could surprise an uninformed implementation
author. They could unknowingly build their implementation in a way that would
result in a runtime panic for their users that update to the new API.
The API is designed to help inform an instrumentation author about this
non-standard API evolution. It requires them to choose a default behavior for
unimplemented interface methods. There are three behavior choices they can
make:
- Compilation failure
- Panic
- Default to another implementation
All interfaces in this API embed a corresponding interface from
[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
behavior of their implementations to be a compilation failure, signaling to
their users they need to update to the latest version of that implementation,
they need to embed the corresponding interface from
[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
example,
import "go.opentelemetry.io/otel/trace/embedded"
type TracerProvider struct {
embedded.TracerProvider
// ...
}
If an author wants the default behavior of their implementations to panic, they
can embed the API interface directly.
import "go.opentelemetry.io/otel/trace"
type TracerProvider struct {
trace.TracerProvider
// ...
}
This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of
[go.opentelemetry.io/otel/trace], which may be done with a trasitive
dependency.
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/trace/noop]:
import "go.opentelemetry.io/otel/trace/noop"
type TracerProvider struct {
noop.TracerProvider
// ...
}
It is strongly recommended that authors only embed
[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
That implementation is the only one OpenTelemetry authors can guarantee will
fully implement all the API interfaces when a user updates their API.
*/
package trace // import "go.opentelemetry.io/otel/trace"

View File

@@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package embedded provides interfaces embedded within the [OpenTelemetry
// trace API].
//
// Implementers of the [OpenTelemetry trace API] can embed the relevant type
// from this package into their implementation directly. Doing so will result
// in a compilation error for users when the [OpenTelemetry trace API] is
// extended (which is something that can happen without a major version bump of
// the API package).
//
// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
package embedded // import "go.opentelemetry.io/otel/trace/embedded"
// TracerProvider is embedded in
// [go.opentelemetry.io/otel/trace.TracerProvider].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
// experience a compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type TracerProvider interface{ tracerProvider() }
// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
// is extended (which is something that can happen without a major version bump
// of the API package).
type Tracer interface{ tracer() }
// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Span interface{ span() }

View File

@@ -19,16 +19,20 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
)
// NewNoopTracerProvider returns an implementation of TracerProvider that
// performs no operations. The Tracer and Spans created from the returned
// TracerProvider also perform no operations.
//
// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
// instead.
func NewNoopTracerProvider() TracerProvider {
return noopTracerProvider{}
}
type noopTracerProvider struct{}
type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
@@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
}
// noopTracer is an implementation of Tracer that performs no operations.
type noopTracer struct{}
type noopTracer struct{ embedded.Tracer }
var _ Tracer = noopTracer{}
@@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
}
// noopSpan is an implementation of Span that performs no operations.
type noopSpan struct{}
type noopSpan struct{ embedded.Span }
var _ Span = noopSpan{}

View File

@@ -22,6 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -48,8 +49,10 @@ func (e errorConst) Error() string {
// nolint:revive // revive complains about stutter of `trace.TraceID`.
type TraceID [16]byte
var nilTraceID TraceID
var _ json.Marshaler = nilTraceID
var (
nilTraceID TraceID
_ json.Marshaler = nilTraceID
)
// IsValid checks whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
@@ -71,8 +74,10 @@ func (t TraceID) String() string {
// SpanID is a unique identity of a span in a trace.
type SpanID [8]byte
var nilSpanID SpanID
var _ json.Marshaler = nilSpanID
var (
nilSpanID SpanID
_ json.Marshaler = nilSpanID
)
// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
@@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Span interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Span
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
@@ -486,8 +498,15 @@ func (sk SpanKind) String() string {
// Tracer is the creator of Spans.
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Tracer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Tracer
// Start creates a span and a context.Context containing the newly-created span.
//
// If the context.Context provided in `ctx` contains a Span then the newly-created
@@ -518,8 +537,15 @@ type Tracer interface {
// at runtime from its users or it can simply use the globally registered one
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type TracerProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.TracerProvider
// Tracer returns a unique Tracer scoped to be used by instrumentation code
// to trace computational workflows. The scope and identity of that
// instrumentation code is uniquely defined by the name and options passed.

View File

@@ -28,9 +28,9 @@ const (
// based on the W3C Trace Context specification, see
// https://www.w3.org/TR/trace-context-1/#tracestate-header
noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@@ -40,9 +40,10 @@ const (
)
var (
keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`)
valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`)
memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
)
type member struct {
@@ -51,10 +52,19 @@ type member struct {
}
func newMember(key, value string) (member, error) {
if !keyRe.MatchString(key) {
if len(key) > 256 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
if !noTenantKeyRe.MatchString(key) {
if !withTenantKeyRe.MatchString(key) {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
atIndex := strings.LastIndex(key, "@")
if atIndex > 241 || len(key)-1-atIndex > 14 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
}
if len(value) > 256 || !valueRe.MatchString(value) {
return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
}
return member{Key: key, Value: value}, nil
@@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) {
func parseMember(m string) (member, error) {
matches := memberRe.FindStringSubmatch(m)
if len(matches) != 5 {
if len(matches) != 3 {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
return member{
Key: matches[1],
Value: matches[4],
}, nil
result, e := newMember(matches[1], matches[2])
if e != nil {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
return result, nil
}
// String encodes member into a string compliant with the W3C Trace Context

View File

@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.15.0"
return "1.20.0"
}

View File

@@ -14,43 +14,41 @@
module-sets:
stable-v1:
version: v1.15.0
version: v1.20.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/jaeger
- go.opentelemetry.io/otel/example/namedtracer
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
- go.opentelemetry.io/otel/exporters/jaeger
- go.opentelemetry.io/otel/exporters/otlp/internal/retry
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.38.0
version: v0.43.0
modules:
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/example/view
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/view
experimental-schema:
version: v0.0.4
version: v0.0.7
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.7
// +build go1.7
package context

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.9
// +build go1.9
package context

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.7
// +build !go1.7
package context

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.9
// +build !go1.9
package context

View File

@@ -20,41 +20,44 @@ import (
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
var dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return new([1 << 10]byte) }},
{New: func() interface{} { return new([2 << 10]byte) }},
{New: func() interface{} { return new([4 << 10]byte) }},
{New: func() interface{} { return new([8 << 10]byte) }},
{New: func() interface{} { return new([16 << 10]byte) }},
}
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
switch {
case size <= 1<<10:
return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
case size <= 2<<10:
return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
case size <= 4<<10:
return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
case size <= 8<<10:
return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
default:
return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
switch len(p) {
case 1 << 10:
dataChunkPools[0].Put((*[1 << 10]byte)(p))
case 2 << 10:
dataChunkPools[1].Put((*[2 << 10]byte)(p))
case 4 << 10:
dataChunkPools[2].Put((*[4 << 10]byte)(p))
case 8 << 10:
dataChunkPools[3].Put((*[8 << 10]byte)(p))
case 16 << 10:
dataChunkPools[4].Put((*[16 << 10]byte)(p))
default:
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.

View File

@@ -1,30 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}

View File

@@ -1,27 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.15
// +build go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

View File

@@ -1,17 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.18
// +build go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return tc.NetConn()
}

View File

@@ -1,21 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
return nil
}

View File

@@ -1,31 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.15
// +build !go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext opens a TLS connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
cn, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
if err := cn.Handshake(); err != nil {
return nil, err
}
if cfg.InsecureSkipVerify {
return cn, nil
}
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
return nil, err
}
return cn, nil
}

View File

@@ -1,17 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return nil
}

View File

@@ -2549,7 +2549,6 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
rws.dirty = true
return 0, err
}
if endStream {
@@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err
}
}
@@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
if err != nil {
rws.dirty = true
}
return len(p), err
}
return len(p), nil
@@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) {
h.Del("Transfer-Encoding")
}
if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: code,
h: h,
endStream: rws.handlerDone && !rws.hasTrailers(),
}) != nil {
rws.dirty = true
}
})
return
}
@@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws)
}
}
// Push errors.
@@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}

View File

@@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() {
if !ok {
return
}
if nc := tlsUnderlyingConn(tc); nc != nil {
if nc := tc.NetConn(); nc != nil {
nc.Close()
}
}
@@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
trace.GotFirstResponseByte()
}
}
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.18
// +build go1.18
package idna

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.10
// +build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
// +build go1.10,!go1.13
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
// +build go1.13,!go1.14
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
// +build go1.14,!go1.16
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
// +build go1.16,!go1.21
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
// +build go1.21
package idna

View File

@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
// +build !go1.10
package idna

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.16
// +build !go1.16
package idna

View File

@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.16
// +build go1.16
package idna

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.5
// +build go1.5
package plan9

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package plan9

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build plan9 && race
// +build plan9,race
package plan9

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build plan9 && !race
// +build plan9,!race
package plan9

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build plan9
// +build plan9
package plan9

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build plan9
// +build plan9
// Package plan9 contains an interface to the low-level operating system
// primitives. OS details vary depending on the underlying system, and

View File

@@ -2,7 +2,6 @@
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build plan9 && 386
// +build plan9,386
package plan9

View File

@@ -2,7 +2,6 @@
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build plan9 && amd64
// +build plan9,amd64
package plan9

View File

@@ -2,7 +2,6 @@
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build plan9 && arm
// +build plan9,arm
package plan9

View File

@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
// +build go1.9
package unix

View File

@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
#include "textflag.h"

View File

@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (freebsd || netbsd || openbsd) && gc
// +build freebsd netbsd openbsd
// +build gc
#include "textflag.h"

Some files were not shown because too many files have changed in this diff Show More