Merge pull request #117342 from kkkkun/update-etcd-deps-3.5.8

upgrade etcd deps to v3.5.8
This commit is contained in:
Kubernetes Prow Robot
2023-05-06 06:19:17 -07:00
committed by GitHub
78 changed files with 1936 additions and 1037 deletions

16
go.mod
View File

@@ -64,9 +64,9 @@ require (
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink v1.1.0
github.com/vmware/govmomi v0.30.0 github.com/vmware/govmomi v0.30.0
go.etcd.io/etcd/api/v3 v3.5.7 go.etcd.io/etcd/api/v3 v3.5.8
go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/pkg/v3 v3.5.8
go.etcd.io/etcd/client/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.8
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0
go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel v1.10.0
@@ -216,11 +216,11 @@ require (
github.com/vishvananda/netns v0.0.4 // indirect github.com/vishvananda/netns v0.0.4 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
github.com/xlab/treeprint v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.7 // indirect
go.etcd.io/etcd/client/v2 v2.305.7 // indirect go.etcd.io/etcd/client/v2 v2.305.8 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/raft/v3 v3.5.7 // indirect go.etcd.io/etcd/raft/v3 v3.5.8 // indirect
go.etcd.io/etcd/server/v3 v3.5.7 // indirect go.etcd.io/etcd/server/v3 v3.5.8 // indirect
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect

39
go.sum
View File

@@ -123,7 +123,6 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
@@ -149,9 +148,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI=
github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
@@ -250,7 +247,6 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
@@ -696,22 +692,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v2 v2.305.7/go.mod h1:GQGT5Z3TBuAQGvgPfhR7VPySu/SudxmEkRq9BgzFU6s= go.etcd.io/etcd/client/v2 v2.305.8/go.mod h1:ZlAsxDK5/10I6xVHhFo9zinCMr/DDLKFetDDXlzKwqE=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/pkg/v3 v3.5.7/go.mod h1:kcOfWt3Ov9zgYdOiJ/o1Y9zFfLhQjylTgL4Lru8opRo= go.etcd.io/etcd/pkg/v3 v3.5.8/go.mod h1:C17MJkZHJIyJV+wWWx6Jz6YS6BfdkOnUkSwT9uuEO7s=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/raft/v3 v3.5.7/go.mod h1:TflkAb/8Uy6JFBxcRaH2Fr6Slm9mCPVdI2efzxY96yU= go.etcd.io/etcd/raft/v3 v3.5.8/go.mod h1:W6P5WxtOMfYNdLSEJX3vc8Pg6LOt+ewI9UCFKcnIexA=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.etcd.io/etcd/server/v3 v3.5.7/go.mod h1:gxBgT84issUVBRpZ3XkW1T55NjOb4vZZRI4wVvNhf4A= go.etcd.io/etcd/server/v3 v3.5.8/go.mod h1:1y2ahPOrlE4pzVV5+rBCDur7QQcEP0MNUPO2dyzwjso=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -938,7 +934,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -43,9 +43,6 @@
}, },
"status": { "status": {
"unwantedReferences": { "unwantedReferences": {
"github.com/getsentry/raven-go": [
"go.etcd.io/etcd/raft/v3"
],
"github.com/go-kit/kit": [ "github.com/go-kit/kit": [
"github.com/grpc-ecosystem/go-grpc-middleware" "github.com/grpc-ecosystem/go-grpc-middleware"
], ],
@@ -92,7 +89,6 @@
"github.com/go-logr/zapr", "github.com/go-logr/zapr",
"github.com/google/cadvisor", "github.com/google/cadvisor",
"github.com/grpc-ecosystem/go-grpc-middleware", "github.com/grpc-ecosystem/go-grpc-middleware",
"go.etcd.io/etcd/raft/v3",
"go.uber.org/zap", "go.uber.org/zap",
"gotest.tools/v3", "gotest.tools/v3",
"k8s.io/system-validators", "k8s.io/system-validators",

View File

@@ -15,8 +15,8 @@ require (
github.com/spf13/cobra v1.7.0 github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/pkg/v3 v3.5.8
go.etcd.io/etcd/client/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.8
go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel v1.10.0
go.opentelemetry.io/otel/trace v1.10.0 go.opentelemetry.io/otel/trace v1.10.0
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21
@@ -88,12 +88,12 @@ require (
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.7 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v2 v2.305.7 // indirect go.etcd.io/etcd/client/v2 v2.305.8 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/raft/v3 v3.5.7 // indirect go.etcd.io/etcd/raft/v3 v3.5.8 // indirect
go.etcd.io/etcd/server/v3 v3.5.7 // indirect go.etcd.io/etcd/server/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect

View File

@@ -58,7 +58,6 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
@@ -75,9 +74,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -106,7 +103,6 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -352,22 +348,22 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v2 v2.305.7/go.mod h1:GQGT5Z3TBuAQGvgPfhR7VPySu/SudxmEkRq9BgzFU6s= go.etcd.io/etcd/client/v2 v2.305.8/go.mod h1:ZlAsxDK5/10I6xVHhFo9zinCMr/DDLKFetDDXlzKwqE=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/pkg/v3 v3.5.7/go.mod h1:kcOfWt3Ov9zgYdOiJ/o1Y9zFfLhQjylTgL4Lru8opRo= go.etcd.io/etcd/pkg/v3 v3.5.8/go.mod h1:C17MJkZHJIyJV+wWWx6Jz6YS6BfdkOnUkSwT9uuEO7s=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/raft/v3 v3.5.7/go.mod h1:TflkAb/8Uy6JFBxcRaH2Fr6Slm9mCPVdI2efzxY96yU= go.etcd.io/etcd/raft/v3 v3.5.8/go.mod h1:W6P5WxtOMfYNdLSEJX3vc8Pg6LOt+ewI9UCFKcnIexA=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.etcd.io/etcd/server/v3 v3.5.7/go.mod h1:gxBgT84issUVBRpZ3XkW1T55NjOb4vZZRI4wVvNhf4A= go.etcd.io/etcd/server/v3 v3.5.8/go.mod h1:1y2ahPOrlE4pzVV5+rBCDur7QQcEP0MNUPO2dyzwjso=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -537,7 +533,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -20,10 +20,10 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.8.2
go.etcd.io/etcd/api/v3 v3.5.7 go.etcd.io/etcd/api/v3 v3.5.8
go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/pkg/v3 v3.5.8
go.etcd.io/etcd/client/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.8
go.etcd.io/etcd/server/v3 v3.5.7 go.etcd.io/etcd/server/v3 v3.5.8
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1
go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel v1.10.0
@@ -104,10 +104,10 @@ require (
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.7 // indirect
go.etcd.io/etcd/client/v2 v2.305.7 // indirect go.etcd.io/etcd/client/v2 v2.305.8 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/raft/v3 v3.5.7 // indirect go.etcd.io/etcd/raft/v3 v3.5.8 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
go.opentelemetry.io/otel/metric v0.31.0 // indirect go.opentelemetry.io/otel/metric v0.31.0 // indirect

View File

@@ -70,7 +70,6 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
@@ -88,9 +87,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk=
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
@@ -123,7 +120,6 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -388,22 +384,22 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v2 v2.305.7/go.mod h1:GQGT5Z3TBuAQGvgPfhR7VPySu/SudxmEkRq9BgzFU6s= go.etcd.io/etcd/client/v2 v2.305.8/go.mod h1:ZlAsxDK5/10I6xVHhFo9zinCMr/DDLKFetDDXlzKwqE=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/pkg/v3 v3.5.7/go.mod h1:kcOfWt3Ov9zgYdOiJ/o1Y9zFfLhQjylTgL4Lru8opRo= go.etcd.io/etcd/pkg/v3 v3.5.8/go.mod h1:C17MJkZHJIyJV+wWWx6Jz6YS6BfdkOnUkSwT9uuEO7s=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/raft/v3 v3.5.7/go.mod h1:TflkAb/8Uy6JFBxcRaH2Fr6Slm9mCPVdI2efzxY96yU= go.etcd.io/etcd/raft/v3 v3.5.8/go.mod h1:W6P5WxtOMfYNdLSEJX3vc8Pg6LOt+ewI9UCFKcnIexA=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.etcd.io/etcd/server/v3 v3.5.7/go.mod h1:gxBgT84issUVBRpZ3XkW1T55NjOb4vZZRI4wVvNhf4A= go.etcd.io/etcd/server/v3 v3.5.8/go.mod h1:1y2ahPOrlE4pzVV5+rBCDur7QQcEP0MNUPO2dyzwjso=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -596,7 +592,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -66,10 +66,10 @@ func NewTestConfig(t testing.TB) *embed.Config {
clientURL := url.URL{Scheme: "http", Host: net.JoinHostPort("localhost", strconv.Itoa(ports[0]))} clientURL := url.URL{Scheme: "http", Host: net.JoinHostPort("localhost", strconv.Itoa(ports[0]))}
peerURL := url.URL{Scheme: "http", Host: net.JoinHostPort("localhost", strconv.Itoa(ports[1]))} peerURL := url.URL{Scheme: "http", Host: net.JoinHostPort("localhost", strconv.Itoa(ports[1]))}
cfg.LPUrls = []url.URL{peerURL} cfg.ListenPeerUrls = []url.URL{peerURL}
cfg.APUrls = []url.URL{peerURL} cfg.AdvertisePeerUrls = []url.URL{peerURL}
cfg.LCUrls = []url.URL{clientURL} cfg.ListenClientUrls = []url.URL{clientURL}
cfg.ACUrls = []url.URL{clientURL} cfg.AdvertiseClientUrls = []url.URL{clientURL}
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zapcore.ErrorLevel)).Named("etcd-server")) cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zapcore.ErrorLevel)).Named("etcd-server"))

View File

@@ -62,11 +62,11 @@ func TestTLSConnection(t *testing.T) {
KeyFile: keyFile, KeyFile: keyFile,
TrustedCAFile: caFile, TrustedCAFile: caFile,
} }
for i := range etcdConfig.LCUrls { for i := range etcdConfig.ListenClientUrls {
etcdConfig.LCUrls[i].Scheme = "https" etcdConfig.ListenClientUrls[i].Scheme = "https"
} }
for i := range etcdConfig.ACUrls { for i := range etcdConfig.AdvertiseClientUrls {
etcdConfig.ACUrls[i].Scheme = "https" etcdConfig.AdvertiseClientUrls[i].Scheme = "https"
} }
client := testserver.RunEtcd(t, etcdConfig) client := testserver.RunEtcd(t, etcdConfig)

View File

@@ -69,9 +69,9 @@ require (
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect

View File

@@ -339,17 +339,17 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

View File

@@ -65,9 +65,9 @@ require (
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/cobra v1.7.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect

View File

@@ -333,17 +333,17 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

View File

@@ -69,9 +69,9 @@ require (
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect

View File

@@ -337,17 +337,17 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

View File

@@ -66,9 +66,9 @@ require (
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect

View File

@@ -334,17 +334,17 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

View File

@@ -63,9 +63,9 @@ require (
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/api/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.8 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect

View File

@@ -334,17 +334,17 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= go.etcd.io/etcd/client/v2 v2.305.8 h1:IGp9Ozt8awy3qRTXSIYJd/o/cr4oUyrm9MF1RJ2dr/c=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= go.etcd.io/etcd/pkg/v3 v3.5.8 h1:hz6w5Cb4p7dbt642m8Y35Ts9yWPWUCymc3v4Z/aiGEU=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= go.etcd.io/etcd/raft/v3 v3.5.8 h1:wM4IAfiY1+vrCAkUicIOzkyjpV9MawnAul2KvxeMgy4=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.etcd.io/etcd/server/v3 v3.5.8 h1:eK9fU6Pd6IJD1k0u4zAq1NZsSsEOOimlP3kIkpcQrho=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

3
vendor/go.etcd.io/bbolt/.gitignore generated vendored
View File

@@ -3,5 +3,8 @@
*.swp *.swp
/bin/ /bin/
cover.out cover.out
cover-*.out
/.idea /.idea
*.iml *.iml
/cmd/bbolt/bbolt

18
vendor/go.etcd.io/bbolt/.travis.yml generated vendored
View File

@@ -1,18 +0,0 @@
language: go
go_import_path: go.etcd.io/bbolt
sudo: false
go:
- 1.15
before_install:
- go get -v golang.org/x/sys/unix
- go get -v honnef.co/go/tools/...
- go get -v github.com/kisielk/errcheck
script:
- make fmt
- make test
- make race
# - make errcheck

71
vendor/go.etcd.io/bbolt/Makefile generated vendored
View File

@@ -2,35 +2,62 @@ BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD` COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
race: TESTFLAGS_RACE=-race=false
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" ifdef ENABLE_RACE
@echo "array freelist test" TESTFLAGS_RACE=-race=true
@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" endif
TESTFLAGS_CPU=
ifdef CPU
TESTFLAGS_CPU=-cpu=$(CPU)
endif
TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
.PHONY: fmt
fmt: fmt:
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
# go get honnef.co/go/tools/simple .PHONY: lint
gosimple: lint:
gosimple ./... golangci-lint run ./...
# go get honnef.co/go/tools/unused
unused:
unused ./...
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
.PHONY: test
test: test:
TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic @echo "hashmap freelist test"
# Note: gets "program not an importable package" in out of path builds TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m
TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
@echo "array freelist test" @echo "array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic .PHONY: coverage
# Note: gets "program not an importable package" in out of path builds coverage:
@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt @echo "hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \
-coverprofile cover-freelist-hashmap.out -covermode atomic
@echo "array freelist test"
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
.PHONY: gofail-disable
gofail-disable:
gofail disable .
.PHONY: install-gofail
install-gofail:
go install go.etcd.io/gofail
.PHONY: test-failpoint
test-failpoint:
@echo "[failpoint] hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
.PHONY: race fmt errcheck test gosimple unused

21
vendor/go.etcd.io/bbolt/README.md generated vendored
View File

@@ -26,7 +26,7 @@ and setting values. That's it.
[gh_ben]: https://github.com/benbjohnson [gh_ben]: https://github.com/benbjohnson
[bolt]: https://github.com/boltdb/bolt [bolt]: https://github.com/boltdb/bolt
[hyc_symas]: https://twitter.com/hyc_symas [hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/ [lmdb]: https://www.symas.com/symas-embedded-database-lmdb
## Project Status ## Project Status
@@ -78,14 +78,23 @@ New minor versions may add additional features to the API.
### Installing ### Installing
To start using Bolt, install Go and run `go get`: To start using Bolt, install Go and run `go get`:
```sh ```sh
$ go get go.etcd.io/bbolt/... $ go get go.etcd.io/bbolt@latest
``` ```
This will retrieve the library and install the `bolt` command line utility into This will retrieve the library and update your `go.mod` and `go.sum` files.
your `$GOBIN` path.
To run the command line utility, execute:
```sh
$ go run go.etcd.io/bbolt/cmd/bbolt@latest
```
Run `go install` to install the `bbolt` command line utility into
your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the
`GOPATH` environment variable is not set.
```sh
$ go install go.etcd.io/bbolt/cmd/bbolt@latest
```
### Importing bbolt ### Importing bbolt
@@ -933,7 +942,7 @@ Below is a list of public, open source projects that use Bolt:
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies * [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. * [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. * [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.

View File

@@ -1,3 +1,4 @@
//go:build arm64
// +build arm64 // +build arm64
package bbolt package bbolt

10
vendor/go.etcd.io/bbolt/bolt_loong64.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
//go:build loong64
// +build loong64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View File

@@ -1,3 +1,4 @@
//go:build mips64 || mips64le
// +build mips64 mips64le // +build mips64 mips64le
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build mips || mipsle
// +build mips mipsle // +build mips mipsle
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build ppc
// +build ppc // +build ppc
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build ppc64
// +build ppc64 // +build ppc64
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build ppc64le
// +build ppc64le // +build ppc64le
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build riscv64
// +build riscv64 // +build riscv64
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build s390x
// +build s390x // +build s390x
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !solaris && !aix
// +build !windows,!plan9,!solaris,!aix // +build !windows,!plan9,!solaris,!aix
package bbolt package bbolt

View File

@@ -1,3 +1,4 @@
//go:build aix
// +build aix // +build aix
package bbolt package bbolt

View File

@@ -6,40 +6,10 @@ import (
"syscall" "syscall"
"time" "time"
"unsafe" "unsafe"
"golang.org/x/sys/windows"
) )
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor. // fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error { func fdatasync(db *DB) error {
return db.file.Sync() return db.file.Sync()
@@ -51,22 +21,22 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
if timeout != 0 { if timeout != 0 {
t = time.Now() t = time.Now()
} }
var flag uint32 = flagLockFailImmediately var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
if exclusive { if exclusive {
flag |= flagLockExclusive flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
} }
for { for {
// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
// -1..0 as the lock on the database file. // -1..0 as the lock on the database file.
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
Offset: m1, Offset: m1,
OffsetHigh: m1, OffsetHigh: m1,
}) })
if err == nil { if err == nil {
return nil return nil
} else if err != errLockViolation { } else if err != windows.ERROR_LOCK_VIOLATION {
return err return err
} }
@@ -83,34 +53,37 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
// funlock releases an advisory lock on a file descriptor. // funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error { func funlock(db *DB) error {
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
Offset: m1, Offset: m1,
OffsetHigh: m1, OffsetHigh: m1,
}) })
return err
} }
// mmap memory maps a DB's data file. // mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go // Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error { func mmap(db *DB, sz int) error {
var sizelo, sizehi uint32
if !db.readOnly { if !db.readOnly {
// Truncate the database to the size of the mmap. // Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil { if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err) return fmt.Errorf("truncate: %s", err)
} }
sizehi = uint32(sz >> 32)
sizelo = uint32(sz) & 0xffffffff
} }
// Open a file mapping handle. // Open a file mapping handle.
sizelo := uint32(sz >> 32) h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
if h == 0 { if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno) return os.NewSyscallError("CreateFileMapping", errno)
} }
// Create the memory map. // Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
if addr == 0 { if addr == 0 {
// Do our best and report error returned from MapViewOfFile.
_ = syscall.CloseHandle(h)
return os.NewSyscallError("MapViewOfFile", errno) return os.NewSyscallError("MapViewOfFile", errno)
} }
@@ -134,8 +107,11 @@ func munmap(db *DB) error {
} }
addr := (uintptr)(unsafe.Pointer(&db.data[0])) addr := (uintptr)(unsafe.Pointer(&db.data[0]))
var err1 error
if err := syscall.UnmapViewOfFile(addr); err != nil { if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err) err1 = os.NewSyscallError("UnmapViewOfFile", err)
} }
return nil db.data = nil
db.datasz = 0
return err1
} }

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !linux && !openbsd
// +build !windows,!plan9,!linux,!openbsd // +build !windows,!plan9,!linux,!openbsd
package bbolt package bbolt

54
vendor/go.etcd.io/bbolt/bucket.go generated vendored
View File

@@ -81,7 +81,7 @@ func (b *Bucket) Writable() bool {
// Do not use a cursor after the transaction is closed. // Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor { func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics. // Update transaction statistics.
b.tx.stats.CursorCount++ b.tx.stats.IncCursorCount(1)
// Allocate and return a cursor. // Allocate and return a cursor.
return &Cursor{ return &Cursor{
@@ -229,11 +229,9 @@ func (b *Bucket) DeleteBucket(key []byte) error {
// Recursively delete all child buckets. // Recursively delete all child buckets.
child := b.Bucket(key) child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error { err := child.ForEachBucket(func(k []byte) error {
if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { if err := child.DeleteBucket(k); err != nil {
if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err)
return fmt.Errorf("delete bucket: %s", err)
}
} }
return nil return nil
}) })
@@ -353,7 +351,7 @@ func (b *Bucket) SetSequence(v uint64) error {
_ = b.node(b.root, nil) _ = b.node(b.root, nil)
} }
// Increment and return the sequence. // Set the sequence.
b.bucket.sequence = v b.bucket.sequence = v
return nil return nil
} }
@@ -378,6 +376,7 @@ func (b *Bucket) NextSequence() (uint64, error) {
} }
// ForEach executes a function for each key/value pair in a bucket. // ForEach executes a function for each key/value pair in a bucket.
// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order.
// If the provided function returns an error then the iteration is stopped and // If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify // the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior. // the bucket; this will result in undefined behavior.
@@ -394,7 +393,22 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
return nil return nil
} }
// Stat returns stats on a bucket. func (b *Bucket) ForEachBucket(fn func(k []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
if flags&bucketLeafFlag != 0 {
if err := fn(k); err != nil {
return err
}
}
}
return nil
}
// Stats returns stats on a bucket.
func (b *Bucket) Stats() BucketStats { func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats var s, subStats BucketStats
pageSize := b.tx.db.pageSize pageSize := b.tx.db.pageSize
@@ -402,7 +416,7 @@ func (b *Bucket) Stats() BucketStats {
if b.root == 0 { if b.root == 0 {
s.InlineBucketN += 1 s.InlineBucketN += 1
} }
b.forEachPage(func(p *page, depth int) { b.forEachPage(func(p *page, depth int, pgstack []pgid) {
if (p.flags & leafPageFlag) != 0 { if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count) s.KeyN += int(p.count)
@@ -461,7 +475,7 @@ func (b *Bucket) Stats() BucketStats {
// Keep track of maximum page depth. // Keep track of maximum page depth.
if depth+1 > s.Depth { if depth+1 > s.Depth {
s.Depth = (depth + 1) s.Depth = depth + 1
} }
}) })
@@ -477,15 +491,15 @@ func (b *Bucket) Stats() BucketStats {
} }
// forEachPage iterates over every page in a bucket, including inline pages. // forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) { func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) {
// If we have an inline page then just use that. // If we have an inline page then just use that.
if b.page != nil { if b.page != nil {
fn(b.page, 0) fn(b.page, 0, []pgid{b.root})
return return
} }
// Otherwise traverse the page hierarchy. // Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn) b.tx.forEachPage(b.root, fn)
} }
// forEachPageNode iterates over every page (or node) in a bucket. // forEachPageNode iterates over every page (or node) in a bucket.
@@ -499,8 +513,8 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
b._forEachPageNode(b.root, 0, fn) b._forEachPageNode(b.root, 0, fn)
} }
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid) var p, n = b.pageNode(pgId)
// Execute function. // Execute function.
fn(p, n, depth) fn(p, n, depth)
@@ -640,11 +654,11 @@ func (b *Bucket) rebalance() {
} }
// node creates a node from a page and associates it with a given parent. // node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node { func (b *Bucket) node(pgId pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected") _assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created. // Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil { if n := b.nodes[pgId]; n != nil {
return n return n
} }
@@ -659,15 +673,15 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
// Use the inline page if this is an inline bucket. // Use the inline page if this is an inline bucket.
var p = b.page var p = b.page
if p == nil { if p == nil {
p = b.tx.page(pgid) p = b.tx.page(pgId)
} }
// Read the page into the node and cache it. // Read the page into the node and cache it.
n.read(p) n.read(p)
b.nodes[pgid] = n b.nodes[pgId] = n
// Update statistics. // Update statistics.
b.tx.stats.NodeCount++ b.tx.stats.IncNodeCount(1)
return n return n
} }

9
vendor/go.etcd.io/bbolt/compact.go generated vendored
View File

@@ -12,7 +12,11 @@ func Compact(dst, src *DB, txMaxSize int64) error {
if err != nil { if err != nil {
return err return err
} }
defer tx.Rollback() defer func() {
if tempErr := tx.Rollback(); tempErr != nil {
err = tempErr
}
}()
if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size. // On each key/value, check if we have exceeded tx size.
@@ -73,8 +77,9 @@ func Compact(dst, src *DB, txMaxSize int64) error {
}); err != nil { }); err != nil {
return err return err
} }
err = tx.Commit()
return tx.Commit() return err
} }
// walkFunc is the type of the function called for keys (buckets and "normal" // walkFunc is the type of the function called for keys (buckets and "normal"

104
vendor/go.etcd.io/bbolt/cursor.go generated vendored
View File

@@ -6,7 +6,8 @@ import (
"sort" "sort"
) )
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. // Cursor represents an iterator that can traverse over all key/value pairs in a bucket
// in lexicographical order.
// Cursors see nested buckets with value == nil. // Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open. // Cursors can be obtained from a transaction and are valid as long as the transaction is open.
// //
@@ -30,10 +31,18 @@ func (c *Cursor) Bucket() *Bucket {
// The returned key and value are only valid for the life of the transaction. // The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) First() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed") _assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.first()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
c.stack = c.stack[:0] c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root) p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first() c.goToFirstElementOnTheStack()
// If we land on an empty page then move to the next value. // If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450 // https://github.com/boltdb/bolt/issues/450
@@ -43,10 +52,9 @@ func (c *Cursor) First() (key []byte, value []byte) {
k, v, flags := c.keyValue() k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 { if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil return k, nil, flags
} }
return k, v return k, v, flags
} }
// Last moves the cursor to the last item in the bucket and returns its key and value. // Last moves the cursor to the last item in the bucket and returns its key and value.
@@ -60,6 +68,17 @@ func (c *Cursor) Last() (key []byte, value []byte) {
ref.index = ref.count() - 1 ref.index = ref.count() - 1
c.stack = append(c.stack, ref) c.stack = append(c.stack, ref)
c.last() c.last()
// If this is an empty page (calling Delete may result in empty pages)
// we call prev to find the last page that is not empty
for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 {
c.prev()
}
if len(c.stack) == 0 {
return nil, nil
}
k, v, flags := c.keyValue() k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 { if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil return k, nil
@@ -84,37 +103,20 @@ func (c *Cursor) Next() (key []byte, value []byte) {
// The returned key and value are only valid for the life of the transaction. // The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) { func (c *Cursor) Prev() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed") _assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.prev()
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 { if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil return k, nil
} }
return k, v return k, v
} }
// Seek moves the cursor to a given key and returns it. // Seek moves the cursor to a given key using a b-tree search and returns it.
// If the key does not exist then the next key is used. If no keys // If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned. // follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction. // The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.seek(seek) k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one. // If we ended up after the last element of a page then move to the next one.
@@ -152,8 +154,6 @@ func (c *Cursor) Delete() error {
// seek moves the cursor to a given key and returns it. // seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used. // If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page. // Start from root page/node and traverse to correct page.
c.stack = c.stack[:0] c.stack = c.stack[:0]
c.search(seek, c.bucket.root) c.search(seek, c.bucket.root)
@@ -163,7 +163,7 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
} }
// first moves the cursor to the first leaf element under the last page in the stack. // first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) first() { func (c *Cursor) goToFirstElementOnTheStack() {
for { for {
// Exit when we hit a leaf page. // Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1] var ref = &c.stack[len(c.stack)-1]
@@ -172,13 +172,13 @@ func (c *Cursor) first() {
} }
// Keep adding pages pointing to the first element to the stack. // Keep adding pages pointing to the first element to the stack.
var pgid pgid var pgId pgid
if ref.node != nil { if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid pgId = ref.node.inodes[ref.index].pgid
} else { } else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
} }
p, n := c.bucket.pageNode(pgid) p, n := c.bucket.pageNode(pgId)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
} }
} }
@@ -193,13 +193,13 @@ func (c *Cursor) last() {
} }
// Keep adding pages pointing to the last element in the stack. // Keep adding pages pointing to the last element in the stack.
var pgid pgid var pgId pgid
if ref.node != nil { if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid pgId = ref.node.inodes[ref.index].pgid
} else { } else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
} }
p, n := c.bucket.pageNode(pgid) p, n := c.bucket.pageNode(pgId)
var nextRef = elemRef{page: p, node: n} var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1 nextRef.index = nextRef.count() - 1
@@ -231,7 +231,7 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
// Otherwise start from where we left off in the stack and find the // Otherwise start from where we left off in the stack and find the
// first element of the first leaf page. // first element of the first leaf page.
c.stack = c.stack[:i+1] c.stack = c.stack[:i+1]
c.first() c.goToFirstElementOnTheStack()
// If this is an empty page then restart and move back up the stack. // If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450 // https://github.com/boltdb/bolt/issues/450
@@ -243,9 +243,33 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
} }
} }
// prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil, 0
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
return c.keyValue()
}
// search recursively performs a binary search against a given page/node until it finds a given key. // search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) { func (c *Cursor) search(key []byte, pgId pgid) {
p, n := c.bucket.pageNode(pgid) p, n := c.bucket.pageNode(pgId)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
} }

179
vendor/go.etcd.io/bbolt/db.go generated vendored
View File

@@ -4,7 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"log" "io"
"os" "os"
"runtime" "runtime"
"sort" "sort"
@@ -81,7 +81,7 @@ type DB struct {
NoFreelistSync bool NoFreelistSync bool
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
// dramatic performance degradation if database is large and framentation in freelist is common. // dramatic performance degradation if database is large and fragmentation in freelist is common.
// The alternative one is using hashmap, it is faster in almost all circumstances // The alternative one is using hashmap, it is faster in almost all circumstances
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
// The default type is array // The default type is array
@@ -95,6 +95,11 @@ type DB struct {
// https://github.com/boltdb/bolt/issues/284 // https://github.com/boltdb/bolt/issues/284
NoGrowSync bool NoGrowSync bool
// When `true`, bbolt will always load the free pages when opening the DB.
// When opening db in write mode, this flag will always automatically
// set to `true`.
PreLoadFreelist bool
// If you want to read the entire database fast, you can set MmapFlag to // If you want to read the entire database fast, you can set MmapFlag to
// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
MmapFlags int MmapFlags int
@@ -129,6 +134,9 @@ type DB struct {
path string path string
openFile func(string, int, os.FileMode) (*os.File, error) openFile func(string, int, os.FileMode) (*os.File, error)
file *os.File file *os.File
// `dataref` isn't used at all on Windows, and the golangci-lint
// always fails on Windows platform.
//nolint
dataref []byte // mmap'ed readonly, write throws SEGV dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte data *[maxMapSize]byte
datasz int datasz int
@@ -193,6 +201,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
db.NoGrowSync = options.NoGrowSync db.NoGrowSync = options.NoGrowSync
db.MmapFlags = options.MmapFlags db.MmapFlags = options.MmapFlags
db.NoFreelistSync = options.NoFreelistSync db.NoFreelistSync = options.NoFreelistSync
db.PreLoadFreelist = options.PreLoadFreelist
db.FreelistType = options.FreelistType db.FreelistType = options.FreelistType
db.Mlock = options.Mlock db.Mlock = options.Mlock
@@ -205,6 +214,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if options.ReadOnly { if options.ReadOnly {
flag = os.O_RDONLY flag = os.O_RDONLY
db.readOnly = true db.readOnly = true
} else {
// always load free pages in write mode
db.PreLoadFreelist = true
} }
db.openFile = options.OpenFile db.openFile = options.OpenFile
@@ -252,21 +264,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err return nil, err
} }
} else { } else {
// Read the first meta page to determine the page size. // try to get the page size from the metadata pages
var buf [0x1000]byte if pgSize, err := db.getPageSize(); err == nil {
// If we can't read the page size, but can read a page, assume db.pageSize = pgSize
// it's the same as the OS or one given -- since that's how the
// page size was chosen in the first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
//
// TODO: scan for next page
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
db.pageSize = int(m.pageSize)
}
} else { } else {
_ = db.close() _ = db.close()
return nil, ErrInvalid return nil, ErrInvalid
@@ -286,12 +286,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err return nil, err
} }
if db.PreLoadFreelist {
db.loadFreelist()
}
if db.readOnly { if db.readOnly {
return db, nil return db, nil
} }
db.loadFreelist()
// Flush freelist when transitioning from no sync to sync so // Flush freelist when transitioning from no sync to sync so
// NoFreelistSync unaware boltdb can open the db later. // NoFreelistSync unaware boltdb can open the db later.
if !db.NoFreelistSync && !db.hasSyncedFreelist() { if !db.NoFreelistSync && !db.hasSyncedFreelist() {
@@ -309,6 +311,96 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return db, nil return db, nil
} }
// getPageSize reads the pageSize from the meta pages. It tries
// to read the first meta page firstly. If the first page is invalid,
// then it tries to read the second page using the default page size.
func (db *DB) getPageSize() (int, error) {
var (
meta0CanRead, meta1CanRead bool
)
// Read the first meta page to determine the page size.
if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil {
// We cannot read the page size from page 0, but can read page 0.
meta0CanRead = canRead
} else {
return pgSize, nil
}
// Read the second meta page to determine the page size.
if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil {
// We cannot read the page size from page 1, but can read page 1.
meta1CanRead = canRead
} else {
return pgSize, nil
}
// If we can't read the page size from both pages, but can read
// either page, then we assume it's the same as the OS or the one
// given, since that's how the page size was chosen in the first place.
//
// If both pages are invalid, and (this OS uses a different page size
// from what the database was created with or the given page size is
// different from what the database was created with), then we are out
// of luck and cannot access the database.
if meta0CanRead || meta1CanRead {
return db.pageSize, nil
}
return 0, ErrInvalid
}
// getPageSizeFromFirstMeta reads the pageSize from the first meta page
func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) {
var buf [0x1000]byte
var metaCanRead bool
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
metaCanRead = true
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
return int(m.pageSize), metaCanRead, nil
}
}
return 0, metaCanRead, ErrInvalid
}
// getPageSizeFromSecondMeta reads the pageSize from the second meta page
func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) {
var (
fileSize int64
metaCanRead bool
)
// get the db file size
if info, err := db.file.Stat(); err != nil {
return 0, metaCanRead, err
} else {
fileSize = info.Size()
}
// We need to read the second meta page, so we should skip the first page;
// but we don't know the exact page size yet, it's chicken & egg problem.
// The solution is to try all the possible page sizes, which starts from 1KB
// and until 16MB (1024<<14) or the end of the db file
//
// TODO: should we support larger page size?
for i := 0; i <= 14; i++ {
var buf [0x1000]byte
var pos int64 = 1024 << uint(i)
if pos >= fileSize-1024 {
break
}
bw, err := db.file.ReadAt(buf[:], pos)
if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) {
metaCanRead = true
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
return int(m.pageSize), metaCanRead, nil
}
}
}
return 0, metaCanRead, ErrInvalid
}
// loadFreelist reads the freelist if it is synced, or reconstructs it // loadFreelist reads the freelist if it is synced, or reconstructs it
// by scanning the DB if it is not synced. It assumes there are no // by scanning the DB if it is not synced. It assumes there are no
// concurrent accesses being made to the freelist. // concurrent accesses being made to the freelist.
@@ -372,6 +464,8 @@ func (db *DB) mmap(minsz int) error {
} }
// Memory-map the data file as a byte slice. // Memory-map the data file as a byte slice.
// gofail: var mapError string
// return errors.New(mapError)
if err := mmap(db, size); err != nil { if err := mmap(db, size); err != nil {
return err return err
} }
@@ -399,11 +493,25 @@ func (db *DB) mmap(minsz int) error {
return nil return nil
} }
func (db *DB) invalidate() {
db.dataref = nil
db.data = nil
db.datasz = 0
db.meta0 = nil
db.meta1 = nil
}
// munmap unmaps the data file from memory. // munmap unmaps the data file from memory.
func (db *DB) munmap() error { func (db *DB) munmap() error {
defer db.invalidate()
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil { if err := munmap(db); err != nil {
return fmt.Errorf("unmap error: " + err.Error()) return fmt.Errorf("unmap error: " + err.Error())
} }
return nil return nil
} }
@@ -552,7 +660,7 @@ func (db *DB) close() error {
if !db.readOnly { if !db.readOnly {
// Unlock the file. // Unlock the file.
if err := funlock(db); err != nil { if err := funlock(db); err != nil {
log.Printf("bolt.Close(): funlock error: %s", err) return fmt.Errorf("bolt.Close(): funlock error: %w", err)
} }
} }
@@ -609,6 +717,13 @@ func (db *DB) beginTx() (*Tx, error) {
return nil, ErrDatabaseNotOpen return nil, ErrDatabaseNotOpen
} }
// Exit if the database is not correctly mapped.
if db.data == nil {
db.mmaplock.RUnlock()
db.metalock.Unlock()
return nil, ErrInvalidMapping
}
// Create a transaction associated with the database. // Create a transaction associated with the database.
t := &Tx{} t := &Tx{}
t.init(db) t.init(db)
@@ -650,6 +765,12 @@ func (db *DB) beginRWTx() (*Tx, error) {
return nil, ErrDatabaseNotOpen return nil, ErrDatabaseNotOpen
} }
// Exit if the database is not correctly mapped.
if db.data == nil {
db.rwlock.Unlock()
return nil, ErrInvalidMapping
}
// Create a transaction associated with the database. // Create a transaction associated with the database.
t := &Tx{writable: true} t := &Tx{writable: true}
t.init(db) t.init(db)
@@ -924,6 +1045,7 @@ func (db *DB) Stats() Stats {
// This is for internal access to the raw data bytes from the C cursor, use // This is for internal access to the raw data bytes from the C cursor, use
// carefully, or not at all. // carefully, or not at all.
func (db *DB) Info() *Info { func (db *DB) Info() *Info {
_assert(db.data != nil, "database file isn't correctly mapped")
return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
} }
@@ -950,7 +1072,7 @@ func (db *DB) meta() *meta {
metaB = db.meta0 metaB = db.meta0
} }
// Use higher meta page if valid. Otherwise fallback to previous, if valid. // Use higher meta page if valid. Otherwise, fallback to previous, if valid.
if err := metaA.validate(); err == nil { if err := metaA.validate(); err == nil {
return metaA return metaA
} else if err := metaB.validate(); err == nil { } else if err := metaB.validate(); err == nil {
@@ -1003,7 +1125,7 @@ func (db *DB) grow(sz int) error {
// If the data is smaller than the alloc size then only allocate what's needed. // If the data is smaller than the alloc size then only allocate what's needed.
// Once it goes over the allocation size then allocate in chunks. // Once it goes over the allocation size then allocate in chunks.
if db.datasz < db.AllocSize { if db.datasz <= db.AllocSize {
sz = db.datasz sz = db.datasz
} else { } else {
sz += db.AllocSize sz += db.AllocSize
@@ -1056,9 +1178,11 @@ func (db *DB) freepages() []pgid {
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
} }
}() }()
tx.checkBucket(&tx.root, reachable, nofreed, ech) tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech)
close(ech) close(ech)
// TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages.
var fids []pgid var fids []pgid
for i := pgid(2); i < db.meta().pgid; i++ { for i := pgid(2); i < db.meta().pgid; i++ {
if _, ok := reachable[i]; !ok { if _, ok := reachable[i]; !ok {
@@ -1082,8 +1206,13 @@ type Options struct {
// under normal operation, but requires a full database re-sync during recovery. // under normal operation, but requires a full database re-sync during recovery.
NoFreelistSync bool NoFreelistSync bool
// PreLoadFreelist sets whether to load the free pages when opening
// the db file. Note when opening db in write mode, bbolt will always
// load the free pages.
PreLoadFreelist bool
// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
// dramatic performance degradation if database is large and framentation in freelist is common. // dramatic performance degradation if database is large and fragmentation in freelist is common.
// The alternative one is using hashmap, it is faster in almost all circumstances // The alternative one is using hashmap, it is faster in almost all circumstances
// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
// The default type is array // The default type is array
@@ -1187,7 +1316,7 @@ func (m *meta) validate() error {
return ErrInvalid return ErrInvalid
} else if m.version != version { } else if m.version != version {
return ErrVersionMismatch return ErrVersionMismatch
} else if m.checksum != 0 && m.checksum != m.sum64() { } else if m.checksum != m.sum64() {
return ErrChecksum return ErrChecksum
} }
return nil return nil

8
vendor/go.etcd.io/bbolt/doc.go generated vendored
View File

@@ -14,8 +14,7 @@ The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux. Bolt currently works on Windows, Mac OS X, and Linux.
# Basics
Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is a collection of buckets and is represented by a single file on disk. A bucket is
@@ -27,8 +26,7 @@ iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time. is allowed at a time.
# Caveats
Caveats
The database uses a read-only, memory-mapped data file to ensure that The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and applications cannot corrupt the database, however, this means that keys and
@@ -38,7 +36,5 @@ will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic. point to different data or can point to invalid memory which will cause a panic.
*/ */
package bbolt package bbolt

7
vendor/go.etcd.io/bbolt/errors.go generated vendored
View File

@@ -16,6 +16,9 @@ var (
// This typically occurs when a file is not a bolt database. // This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database") ErrInvalid = errors.New("invalid database")
// ErrInvalidMapping is returned when the database file fails to get mapped.
ErrInvalidMapping = errors.New("database isn't correctly mapped")
// ErrVersionMismatch is returned when the data file was created with a // ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt. // different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch") ErrVersionMismatch = errors.New("version mismatch")
@@ -41,6 +44,10 @@ var (
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database. // read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode") ErrDatabaseReadOnly = errors.New("database is in read-only mode")
// ErrFreePagesNotLoaded is returned when a readonly transaction without
// preloading the free pages is trying to access the free pages.
ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
) )
// These errors can occur when putting or deleting a value or a bucket. // These errors can occur when putting or deleting a value or a bucket.

19
vendor/go.etcd.io/bbolt/freelist.go generated vendored
View File

@@ -24,7 +24,7 @@ type freelist struct {
ids []pgid // all free and available free page ids. ids []pgid // all free and available free page ids.
allocs map[pgid]txid // mapping of txid that allocated a pgid. allocs map[pgid]txid // mapping of txid that allocated a pgid.
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids. cache map[pgid]struct{} // fast lookup of all free and pending page ids.
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
forwardMap map[pgid]uint64 // key is start pgid, value is its span size forwardMap map[pgid]uint64 // key is start pgid, value is its span size
backwardMap map[pgid]uint64 // key is end pgid, value is its span size backwardMap map[pgid]uint64 // key is end pgid, value is its span size
@@ -41,7 +41,7 @@ func newFreelist(freelistType FreelistType) *freelist {
freelistType: freelistType, freelistType: freelistType,
allocs: make(map[pgid]txid), allocs: make(map[pgid]txid),
pending: make(map[txid]*txPending), pending: make(map[txid]*txPending),
cache: make(map[pgid]bool), cache: make(map[pgid]struct{}),
freemaps: make(map[uint64]pidSet), freemaps: make(map[uint64]pidSet),
forwardMap: make(map[pgid]uint64), forwardMap: make(map[pgid]uint64),
backwardMap: make(map[pgid]uint64), backwardMap: make(map[pgid]uint64),
@@ -171,13 +171,13 @@ func (f *freelist) free(txid txid, p *page) {
for id := p.id; id <= p.id+pgid(p.overflow); id++ { for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free. // Verify that page is not already free.
if f.cache[id] { if _, ok := f.cache[id]; ok {
panic(fmt.Sprintf("page %d already freed", id)) panic(fmt.Sprintf("page %d already freed", id))
} }
// Add to the freelist and cache. // Add to the freelist and cache.
txp.ids = append(txp.ids, id) txp.ids = append(txp.ids, id)
txp.alloctx = append(txp.alloctx, allocTxid) txp.alloctx = append(txp.alloctx, allocTxid)
f.cache[id] = true f.cache[id] = struct{}{}
} }
} }
@@ -256,8 +256,9 @@ func (f *freelist) rollback(txid txid) {
} }
// freed returns whether a given page is in the free list. // freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool { func (f *freelist) freed(pgId pgid) bool {
return f.cache[pgid] _, ok := f.cache[pgId]
return ok
} }
// read initializes the freelist from a freelist page. // read initializes the freelist from a freelist page.
@@ -386,13 +387,13 @@ func (f *freelist) noSyncReload(pgids []pgid) {
// reindex rebuilds the free cache based on available and pending free lists. // reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() { func (f *freelist) reindex() {
ids := f.getFreePageIDs() ids := f.getFreePageIDs()
f.cache = make(map[pgid]bool, len(ids)) f.cache = make(map[pgid]struct{}, len(ids))
for _, id := range ids { for _, id := range ids {
f.cache[id] = true f.cache[id] = struct{}{}
} }
for _, txp := range f.pending { for _, txp := range f.pending {
for _, pendingID := range txp.ids { for _, pendingID := range txp.ids {
f.cache[pendingID] = true f.cache[pendingID] = struct{}{}
} }
} }
} }

View File

@@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package bbolt package bbolt
@@ -17,7 +18,7 @@ func mlock(db *DB, fileSize int) error {
return nil return nil
} }
//munlock unlocks memory of db file // munlock unlocks memory of db file
func munlock(db *DB, fileSize int) error { func munlock(db *DB, fileSize int) error {
if db.dataref == nil { if db.dataref == nil {
return nil return nil

View File

@@ -5,7 +5,7 @@ func mlock(_ *DB, _ int) error {
panic("mlock is supported only on UNIX systems") panic("mlock is supported only on UNIX systems")
} }
//munlock unlocks memory of db file // munlock unlocks memory of db file
func munlock(_ *DB, _ int) error { func munlock(_ *DB, _ int) error {
panic("munlock is supported only on UNIX systems") panic("munlock is supported only on UNIX systems")
} }

28
vendor/go.etcd.io/bbolt/node.go generated vendored
View File

@@ -113,9 +113,9 @@ func (n *node) prevSibling() *node {
} }
// put inserts a key/value. // put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid { if pgId >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 { } else if len(oldKey) <= 0 {
panic("put: zero-length old key") panic("put: zero-length old key")
} else if len(newKey) <= 0 { } else if len(newKey) <= 0 {
@@ -136,7 +136,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
inode.flags = flags inode.flags = flags
inode.key = newKey inode.key = newKey
inode.value = value inode.value = value
inode.pgid = pgid inode.pgid = pgId
_assert(len(inode.key) > 0, "put: zero-length inode key") _assert(len(inode.key) > 0, "put: zero-length inode key")
} }
@@ -188,12 +188,16 @@ func (n *node) read(p *page) {
} }
// write writes the items onto one or more pages. // write writes the items onto one or more pages.
// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
// and the rest should be zeroed.
func (n *node) write(p *page) { func (n *node) write(p *page) {
_assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page")
// Initialize page. // Initialize page.
if n.isLeaf { if n.isLeaf {
p.flags |= leafPageFlag p.flags = leafPageFlag
} else { } else {
p.flags |= branchPageFlag p.flags = branchPageFlag
} }
if len(n.inodes) >= 0xFFFF { if len(n.inodes) >= 0xFFFF {
@@ -300,7 +304,7 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
n.inodes = n.inodes[:splitIndex] n.inodes = n.inodes[:splitIndex]
// Update the statistics. // Update the statistics.
n.bucket.tx.stats.Split++ n.bucket.tx.stats.IncSplit(1)
return n, next return n, next
} }
@@ -387,7 +391,7 @@ func (n *node) spill() error {
} }
// Update the statistics. // Update the statistics.
tx.stats.Spill++ tx.stats.IncSpill(1)
} }
// If the root node split and created a new root then we need to spill that // If the root node split and created a new root then we need to spill that
@@ -409,7 +413,7 @@ func (n *node) rebalance() {
n.unbalanced = false n.unbalanced = false
// Update statistics. // Update statistics.
n.bucket.tx.stats.Rebalance++ n.bucket.tx.stats.IncRebalance(1)
// Ignore if node is above threshold (25%) and has enough keys. // Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4 var threshold = n.bucket.tx.db.pageSize / 4
@@ -543,7 +547,7 @@ func (n *node) dereference() {
} }
// Update statistics. // Update statistics.
n.bucket.tx.stats.NodeDeref++ n.bucket.tx.stats.IncNodeDeref(1)
} }
// free adds the node's underlying page to the freelist. // free adds the node's underlying page to the freelist.
@@ -581,6 +585,10 @@ func (n *node) dump() {
} }
*/ */
func compareKeys(left, right []byte) int {
return bytes.Compare(left, right)
}
type nodes []*node type nodes []*node
func (s nodes) Len() int { return len(s) } func (s nodes) Len() int { return len(s) }

10
vendor/go.etcd.io/bbolt/page.go generated vendored
View File

@@ -53,6 +53,16 @@ func (p *page) meta() *meta {
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
} }
func (p *page) fastCheck(id pgid) {
_assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
// Only one flag of page-type can be set.
_assert(p.flags == branchPageFlag ||
p.flags == leafPageFlag ||
p.flags == metaPageFlag ||
p.flags == freelistPageFlag,
"page %v: has unexpected type/flags: %x", p.id, p.flags)
}
// leafPageElement retrieves the leaf node by index // leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement { func (p *page) leafPageElement(index uint16) *leafPageElement {
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),

382
vendor/go.etcd.io/bbolt/tx.go generated vendored
View File

@@ -6,6 +6,7 @@ import (
"os" "os"
"sort" "sort"
"strings" "strings"
"sync/atomic"
"time" "time"
"unsafe" "unsafe"
) )
@@ -151,17 +152,19 @@ func (tx *Tx) Commit() error {
// Rebalance nodes which have had deletions. // Rebalance nodes which have had deletions.
var startTime = time.Now() var startTime = time.Now()
tx.root.rebalance() tx.root.rebalance()
if tx.stats.Rebalance > 0 { if tx.stats.GetRebalance() > 0 {
tx.stats.RebalanceTime += time.Since(startTime) tx.stats.IncRebalanceTime(time.Since(startTime))
} }
opgid := tx.meta.pgid
// spill data onto dirty pages. // spill data onto dirty pages.
startTime = time.Now() startTime = time.Now()
if err := tx.root.spill(); err != nil { if err := tx.root.spill(); err != nil {
tx.rollback() tx.rollback()
return err return err
} }
tx.stats.SpillTime += time.Since(startTime) tx.stats.IncSpillTime(time.Since(startTime))
// Free the old root bucket. // Free the old root bucket.
tx.meta.root.root = tx.root.root tx.meta.root.root = tx.root.root
@@ -180,6 +183,14 @@ func (tx *Tx) Commit() error {
tx.meta.freelist = pgidNoFreelist tx.meta.freelist = pgidNoFreelist
} }
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk. // Write dirty pages to disk.
startTime = time.Now() startTime = time.Now()
if err := tx.write(); err != nil { if err := tx.write(); err != nil {
@@ -208,7 +219,7 @@ func (tx *Tx) Commit() error {
tx.rollback() tx.rollback()
return err return err
} }
tx.stats.WriteTime += time.Since(startTime) tx.stats.IncWriteTime(time.Since(startTime))
// Finalize the transaction. // Finalize the transaction.
tx.close() tx.close()
@@ -224,7 +235,6 @@ func (tx *Tx) Commit() error {
func (tx *Tx) commitFreelist() error { func (tx *Tx) commitFreelist() error {
// Allocate new pages for the new free list. This will overestimate // Allocate new pages for the new free list. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad). // the size of the freelist but not underestimate the size (which would be bad).
opgid := tx.meta.pgid
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil { if err != nil {
tx.rollback() tx.rollback()
@@ -235,13 +245,6 @@ func (tx *Tx) commitFreelist() error {
return err return err
} }
tx.meta.freelist = p.id tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
return nil return nil
} }
@@ -275,13 +278,17 @@ func (tx *Tx) rollback() {
} }
if tx.writable { if tx.writable {
tx.db.freelist.rollback(tx.meta.txid) tx.db.freelist.rollback(tx.meta.txid)
if !tx.db.hasSyncedFreelist() { // When mmap fails, the `data`, `dataref` and `datasz` may be reset to
// Reconstruct free page list by scanning the DB to get the whole free page list. // zero values, and there is no way to reload free page IDs in this case.
// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. if tx.db.data != nil {
tx.db.freelist.noSyncReload(tx.db.freepages()) if !tx.db.hasSyncedFreelist() {
} else { // Reconstruct free page list by scanning the DB to get the whole free page list.
// Read free page list from freelist page. // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) tx.db.freelist.noSyncReload(tx.db.freepages())
} else {
// Read free page list from freelist page.
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
}
} }
} }
tx.close() tx.close()
@@ -400,98 +407,6 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
return f.Close() return f.Close()
} }
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
return ch
}
func (tx *Tx) check(ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.freelist != pgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references", int(id))
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
// allocate returns a contiguous block of memory starting at a given page. // allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) { func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(tx.meta.txid, count) p, err := tx.db.allocate(tx.meta.txid, count)
@@ -503,8 +418,8 @@ func (tx *Tx) allocate(count int) (*page, error) {
tx.pages[p.id] = p tx.pages[p.id] = p
// Update statistics. // Update statistics.
tx.stats.PageCount += count tx.stats.IncPageCount(int64(count))
tx.stats.PageAlloc += count * tx.db.pageSize tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
return p, nil return p, nil
} }
@@ -539,7 +454,7 @@ func (tx *Tx) write() error {
} }
// Update statistics. // Update statistics.
tx.stats.Write++ tx.stats.IncWrite(1)
// Exit inner for loop if we've written all the chunks. // Exit inner for loop if we've written all the chunks.
rem -= sz rem -= sz
@@ -574,7 +489,7 @@ func (tx *Tx) write() error {
for i := range buf { for i := range buf {
buf[i] = 0 buf[i] = 0
} }
tx.db.pagePool.Put(buf) tx.db.pagePool.Put(buf) //nolint:staticcheck
} }
return nil return nil
@@ -598,7 +513,7 @@ func (tx *Tx) writeMeta() error {
} }
// Update statistics. // Update statistics.
tx.stats.Write++ tx.stats.IncWrite(1)
return nil return nil
} }
@@ -609,26 +524,35 @@ func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first. // Check the dirty pages first.
if tx.pages != nil { if tx.pages != nil {
if p, ok := tx.pages[id]; ok { if p, ok := tx.pages[id]; ok {
p.fastCheck(id)
return p return p
} }
} }
// Otherwise return directly from the mmap. // Otherwise return directly from the mmap.
return tx.db.page(id) p := tx.db.page(id)
p.fastCheck(id)
return p
} }
// forEachPage iterates over every page within a given page and executes a function. // forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) {
p := tx.page(pgid) stack := make([]pgid, 10)
stack[0] = pgidnum
tx.forEachPageInternal(stack[:1], fn)
}
func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) {
p := tx.page(pgidstack[len(pgidstack)-1])
// Execute function. // Execute function.
fn(p, depth) fn(p, len(pgidstack)-1, pgidstack)
// Recursively loop over children. // Recursively loop over children.
if (p.flags & branchPageFlag) != 0 { if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ { for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i)) elem := p.branchPageElement(uint16(i))
tx.forEachPage(elem.pgid, depth+1, fn) tx.forEachPageInternal(append(pgidstack, elem.pgid), fn)
} }
} }
} }
@@ -642,6 +566,10 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
return nil, nil return nil, nil
} }
if tx.db.freelist == nil {
return nil, ErrFreePagesNotLoaded
}
// Build the page info. // Build the page info.
p := tx.db.page(pgid(id)) p := tx.db.page(pgid(id))
info := &PageInfo{ info := &PageInfo{
@@ -663,43 +591,61 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
// TxStats represents statistics about the actions performed by the transaction. // TxStats represents statistics about the actions performed by the transaction.
type TxStats struct { type TxStats struct {
// Page statistics. // Page statistics.
PageCount int // number of page allocations //
PageAlloc int // total bytes allocated // DEPRECATED: Use GetPageCount() or IncPageCount()
PageCount int64 // number of page allocations
// DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
PageAlloc int64 // total bytes allocated
// Cursor statistics. // Cursor statistics.
CursorCount int // number of cursors created //
// DEPRECATED: Use GetCursorCount() or IncCursorCount()
CursorCount int64 // number of cursors created
// Node statistics // Node statistics
NodeCount int // number of node allocations //
NodeDeref int // number of node dereferences // DEPRECATED: Use GetNodeCount() or IncNodeCount()
NodeCount int64 // number of node allocations
// DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
NodeDeref int64 // number of node dereferences
// Rebalance statistics. // Rebalance statistics.
Rebalance int // number of node rebalances //
// DEPRECATED: Use GetRebalance() or IncRebalance()
Rebalance int64 // number of node rebalances
// DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
RebalanceTime time.Duration // total time spent rebalancing RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics. // Split/Spill statistics.
Split int // number of nodes split //
Spill int // number of nodes spilled // DEPRECATED: Use GetSplit() or IncSplit()
Split int64 // number of nodes split
// DEPRECATED: Use GetSpill() or IncSpill()
Spill int64 // number of nodes spilled
// DEPRECATED: Use GetSpillTime() or IncSpillTime()
SpillTime time.Duration // total time spent spilling SpillTime time.Duration // total time spent spilling
// Write statistics. // Write statistics.
Write int // number of writes performed //
// DEPRECATED: Use GetWrite() or IncWrite()
Write int64 // number of writes performed
// DEPRECATED: Use GetWriteTime() or IncWriteTime()
WriteTime time.Duration // total time spent writing to disk WriteTime time.Duration // total time spent writing to disk
} }
func (s *TxStats) add(other *TxStats) { func (s *TxStats) add(other *TxStats) {
s.PageCount += other.PageCount s.IncPageCount(other.GetPageCount())
s.PageAlloc += other.PageAlloc s.IncPageAlloc(other.GetPageAlloc())
s.CursorCount += other.CursorCount s.IncCursorCount(other.GetCursorCount())
s.NodeCount += other.NodeCount s.IncNodeCount(other.GetNodeCount())
s.NodeDeref += other.NodeDeref s.IncNodeDeref(other.GetNodeDeref())
s.Rebalance += other.Rebalance s.IncRebalance(other.GetRebalance())
s.RebalanceTime += other.RebalanceTime s.IncRebalanceTime(other.GetRebalanceTime())
s.Split += other.Split s.IncSplit(other.GetSplit())
s.Spill += other.Spill s.IncSpill(other.GetSpill())
s.SpillTime += other.SpillTime s.IncSpillTime(other.GetSpillTime())
s.Write += other.Write s.IncWrite(other.GetWrite())
s.WriteTime += other.WriteTime s.IncWriteTime(other.GetWriteTime())
} }
// Sub calculates and returns the difference between two sets of transaction stats. // Sub calculates and returns the difference between two sets of transaction stats.
@@ -707,17 +653,145 @@ func (s *TxStats) add(other *TxStats) {
// you need the performance counters that occurred within that time span. // you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats { func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats var diff TxStats
diff.PageCount = s.PageCount - other.PageCount diff.PageCount = s.GetPageCount() - other.GetPageCount()
diff.PageAlloc = s.PageAlloc - other.PageAlloc diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
diff.CursorCount = s.CursorCount - other.CursorCount diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
diff.NodeCount = s.NodeCount - other.NodeCount diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
diff.NodeDeref = s.NodeDeref - other.NodeDeref diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
diff.Rebalance = s.Rebalance - other.Rebalance diff.Rebalance = s.GetRebalance() - other.GetRebalance()
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
diff.Split = s.Split - other.Split diff.Split = s.GetSplit() - other.GetSplit()
diff.Spill = s.Spill - other.Spill diff.Spill = s.GetSpill() - other.GetSpill()
diff.SpillTime = s.SpillTime - other.SpillTime diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
diff.Write = s.Write - other.Write diff.Write = s.GetWrite() - other.GetWrite()
diff.WriteTime = s.WriteTime - other.WriteTime diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
return diff return diff
} }
// GetPageCount returns PageCount atomically.
func (s *TxStats) GetPageCount() int64 {
return atomic.LoadInt64(&s.PageCount)
}
// IncPageCount increases PageCount atomically and returns the new value.
func (s *TxStats) IncPageCount(delta int64) int64 {
return atomic.AddInt64(&s.PageCount, delta)
}
// GetPageAlloc returns PageAlloc atomically.
func (s *TxStats) GetPageAlloc() int64 {
return atomic.LoadInt64(&s.PageAlloc)
}
// IncPageAlloc increases PageAlloc atomically and returns the new value.
func (s *TxStats) IncPageAlloc(delta int64) int64 {
return atomic.AddInt64(&s.PageAlloc, delta)
}
// GetCursorCount returns CursorCount atomically.
func (s *TxStats) GetCursorCount() int64 {
return atomic.LoadInt64(&s.CursorCount)
}
// IncCursorCount increases CursorCount atomically and return the new value.
func (s *TxStats) IncCursorCount(delta int64) int64 {
return atomic.AddInt64(&s.CursorCount, delta)
}
// GetNodeCount returns NodeCount atomically.
func (s *TxStats) GetNodeCount() int64 {
return atomic.LoadInt64(&s.NodeCount)
}
// IncNodeCount increases NodeCount atomically and returns the new value.
func (s *TxStats) IncNodeCount(delta int64) int64 {
return atomic.AddInt64(&s.NodeCount, delta)
}
// GetNodeDeref returns NodeDeref atomically.
func (s *TxStats) GetNodeDeref() int64 {
return atomic.LoadInt64(&s.NodeDeref)
}
// IncNodeDeref increases NodeDeref atomically and returns the new value.
func (s *TxStats) IncNodeDeref(delta int64) int64 {
return atomic.AddInt64(&s.NodeDeref, delta)
}
// GetRebalance returns Rebalance atomically.
func (s *TxStats) GetRebalance() int64 {
return atomic.LoadInt64(&s.Rebalance)
}
// IncRebalance increases Rebalance atomically and returns the new value.
func (s *TxStats) IncRebalance(delta int64) int64 {
return atomic.AddInt64(&s.Rebalance, delta)
}
// GetRebalanceTime returns RebalanceTime atomically.
func (s *TxStats) GetRebalanceTime() time.Duration {
return atomicLoadDuration(&s.RebalanceTime)
}
// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.RebalanceTime, delta)
}
// GetSplit returns Split atomically.
func (s *TxStats) GetSplit() int64 {
return atomic.LoadInt64(&s.Split)
}
// IncSplit increases Split atomically and returns the new value.
func (s *TxStats) IncSplit(delta int64) int64 {
return atomic.AddInt64(&s.Split, delta)
}
// GetSpill returns Spill atomically.
func (s *TxStats) GetSpill() int64 {
return atomic.LoadInt64(&s.Spill)
}
// IncSpill increases Spill atomically and returns the new value.
func (s *TxStats) IncSpill(delta int64) int64 {
return atomic.AddInt64(&s.Spill, delta)
}
// GetSpillTime returns SpillTime atomically.
func (s *TxStats) GetSpillTime() time.Duration {
return atomicLoadDuration(&s.SpillTime)
}
// IncSpillTime increases SpillTime atomically and returns the new value.
func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.SpillTime, delta)
}
// GetWrite returns Write atomically.
func (s *TxStats) GetWrite() int64 {
return atomic.LoadInt64(&s.Write)
}
// IncWrite increases Write atomically and returns the new value.
func (s *TxStats) IncWrite(delta int64) int64 {
return atomic.AddInt64(&s.Write, delta)
}
// GetWriteTime returns WriteTime atomically.
func (s *TxStats) GetWriteTime() time.Duration {
return atomicLoadDuration(&s.WriteTime)
}
// IncWriteTime increases WriteTime atomically and returns the new value.
func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.WriteTime, delta)
}
func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
}
func atomicLoadDuration(ptr *time.Duration) time.Duration {
return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
}

226
vendor/go.etcd.io/bbolt/tx_check.go generated vendored Normal file
View File

@@ -0,0 +1,226 @@
package bbolt
import (
"encoding/hex"
"fmt"
)
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
return tx.CheckWithOptions()
}
// CheckWithOptions allows users to provide a customized `KVStringer` implementation,
// so that bolt can generate human-readable diagnostic messages.
func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error {
chkConfig := checkConfig{
kvStringer: HexKVStringer(),
}
for _, op := range options {
op(&chkConfig)
}
ch := make(chan error)
go tx.check(chkConfig.kvStringer, ch)
return ch
}
func (tx *Tx) check(kvStringer KVStringer, ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.freelist != pgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool,
kvStringer KVStringer, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack)
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack)
}
})
tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch)
// Check each bucket within this bucket.
_ = b.ForEachBucket(func(k []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, kvStringer, ch)
}
return nil
})
}
// recursivelyCheckPages confirms database consistency with respect to b-tree
// key order constraints:
// - keys on pages must be sorted
// - keys on children pages are between 2 consecutive keys on the parent's branch page).
func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) {
tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch)
}
// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are:
// - >=`minKeyClosed` (can be nil)
// - <`maxKeyOpen` (can be nil)
// - Are in right ordering relationship to their parents.
// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
func (tx *Tx) recursivelyCheckPagesInternal(
pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid,
keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
p := tx.page(pgId)
pagesStack = append(pagesStack, pgId)
switch {
case p.flags&branchPageFlag != 0:
// For branch page we navigate ranges of all subpages.
runningMin := minKeyClosed
for i := range p.branchPageElements() {
elem := p.branchPageElement(uint16(i))
verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
maxKey := maxKeyOpen
if i < len(p.branchPageElements())-1 {
maxKey = p.branchPageElement(uint16(i + 1)).key()
}
maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch)
runningMin = maxKeyInSubtree
}
return maxKeyInSubtree
case p.flags&leafPageFlag != 0:
runningMin := minKeyClosed
for i := range p.leafPageElements() {
elem := p.leafPageElement(uint16(i))
verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
runningMin = elem.key()
}
if p.count > 0 {
return p.leafPageElement(p.count - 1).key()
}
default:
ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId)
}
return maxKeyInSubtree
}
/***
* verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
* is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
*/
func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) {
if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if index > 0 {
cmpRet := compareKeys(previousKey, key)
if cmpRet > 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if cmpRet == 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
// ===========================================================================================
type checkConfig struct {
kvStringer KVStringer
}
type CheckOption func(options *checkConfig)
func WithKVStringer(kvStringer KVStringer) CheckOption {
return func(c *checkConfig) {
c.kvStringer = kvStringer
}
}
// KVStringer allows to prepare human-readable diagnostic messages.
type KVStringer interface {
KeyToString([]byte) string
ValueToString([]byte) string
}
// HexKVStringer serializes both key & value to hex representation.
func HexKVStringer() KVStringer {
return hexKvStringer{}
}
type hexKvStringer struct{}
func (_ hexKvStringer) KeyToString(key []byte) string {
return hex.EncodeToString(key)
}
func (_ hexKvStringer) ValueToString(value []byte) string {
return hex.EncodeToString(value)
}

View File

@@ -26,7 +26,7 @@ import (
var ( var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with. // MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0" MinClusterVersion = "3.0.0"
Version = "3.5.7" Version = "3.5.8"
APIVersion = "unknown" APIVersion = "unknown"
// Git SHA Value will be set during build // Git SHA Value will be set during build

View File

@@ -16,6 +16,7 @@ package logutil
import ( import (
"sort" "sort"
"time"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
@@ -46,15 +47,20 @@ var DefaultZapLoggerConfig = zap.Config{
// copied from "zap.NewProductionEncoderConfig" with some updates // copied from "zap.NewProductionEncoderConfig" with some updates
EncoderConfig: zapcore.EncoderConfig{ EncoderConfig: zapcore.EncoderConfig{
TimeKey: "ts", TimeKey: "ts",
LevelKey: "level", LevelKey: "level",
NameKey: "logger", NameKey: "logger",
CallerKey: "caller", CallerKey: "caller",
MessageKey: "msg", MessageKey: "msg",
StacktraceKey: "stacktrace", StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding, LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder, EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
// Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps
EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700"))
},
EncodeDuration: zapcore.StringDurationEncoder, EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder, EncodeCaller: zapcore.ShortCallerEncoder,
}, },

View File

@@ -0,0 +1,47 @@
// Copyright 2023 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil
import (
"crypto/tls"
"fmt"
)
type TLSVersion string
// Constants for TLS versions.
const (
TLSVersionDefault TLSVersion = ""
TLSVersion12 TLSVersion = "TLS1.2"
TLSVersion13 TLSVersion = "TLS1.3"
)
// GetTLSVersion returns the corresponding tls.Version or error.
func GetTLSVersion(version string) (uint16, error) {
var v uint16
switch version {
case string(TLSVersionDefault):
v = 0 // 0 means let Go decide.
case string(TLSVersion12):
v = tls.VersionTLS12
case string(TLSVersion13):
v = tls.VersionTLS13
default:
return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
}
return v, nil
}

View File

@@ -165,6 +165,14 @@ type TLSInfo struct {
// Note that cipher suites are prioritized in the given order. // Note that cipher suites are prioritized in the given order.
CipherSuites []uint16 CipherSuites []uint16
// MinVersion is the minimum TLS version that is acceptable.
// If not set, the minimum version is TLS 1.2.
MinVersion uint16
// MaxVersion is the maximum TLS version that is acceptable.
// If not set, the default used by Go is selected (see tls.Config.MaxVersion).
MaxVersion uint16
selfCert bool selfCert bool
// parseFunc exists to simplify testing. Typically, parseFunc // parseFunc exists to simplify testing. Typically, parseFunc
@@ -339,8 +347,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
// Previously, // Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello // 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff: // 2. Server calls (*tls.Config).GetCertificate iff:
// - Server's (*tls.Config).Certificates is not empty, or // - Server's (*tls.Config).Certificates is not empty, or
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
// //
// When (*tls.Config).Certificates is always populated on initial handshake, // When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS // client is expected to provide a valid matching SNI to pass the TLS
@@ -378,8 +386,17 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
} }
} }
var minVersion uint16
if info.MinVersion != 0 {
minVersion = info.MinVersion
} else {
// Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
minVersion = tls.VersionTLS12
}
cfg := &tls.Config{ cfg := &tls.Config{
MinVersion: tls.VersionTLS12, MinVersion: minVersion,
MaxVersion: info.MaxVersion,
ServerName: info.ServerName, ServerName: info.ServerName,
} }
@@ -510,11 +527,6 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) {
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"} cfg.NextProtos = []string{"h2"}
// go1.13 enables TLS 1.3 by default
// and in TLS 1.3, cipher suites are not configurable
// setting Max TLS version to TLS 1.2 for go 1.13
cfg.MaxVersion = tls.VersionTLS12
return cfg, nil return cfg, nil
} }
@@ -569,11 +581,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) {
} }
} }
// go1.13 enables TLS 1.3 by default
// and in TLS 1.3, cipher suites are not configurable
// setting Max TLS version to TLS 1.2 for go 1.13
cfg.MaxVersion = tls.VersionTLS12
return cfg, nil return cfg, nil
} }

View File

@@ -68,6 +68,5 @@ Use a custom context to set timeouts on your operations:
// handle error // handle error
} }
} }
*/ */
package client package client

View File

@@ -61,7 +61,8 @@
// //
// 1. context error: canceled or deadline exceeded. // 1. context error: canceled or deadline exceeded.
// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. // 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go //
// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
// //
// Here is the example code to handle client errors: // Here is the example code to handle client errors:
// //
@@ -102,5 +103,4 @@
// The grpc load balancer is registered statically and is shared across etcd clients. // The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment // To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1". // variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
package clientv3 package clientv3

View File

@@ -45,8 +45,8 @@ func extractHostFromPath(pathStr string) string {
return extractHostFromHostPort(path.Base(pathStr)) return extractHostFromHostPort(path.Base(pathStr))
} }
//mustSplit2 returns the values from strings.SplitN(s, sep, 2). // mustSplit2 returns the values from strings.SplitN(s, sep, 2).
//If sep is not found, it returns ("", "", false) instead. // If sep is not found, it returns ("", "", false) instead.
func mustSplit2(s, sep string) (string, string) { func mustSplit2(s, sep string) (string, string) {
spl := strings.SplitN(s, sep, 2) spl := strings.SplitN(s, sep, 2)
if len(spl) < 2 { if len(spl) < 2 {
@@ -81,11 +81,12 @@ func schemeToCredsRequirement(schema string) CredsRequirement {
// The main differences: // The main differences:
// - etcd supports unixs & https names as opposed to unix & http to // - etcd supports unixs & https names as opposed to unix & http to
// distinguish need to configure certificates. // distinguish need to configure certificates.
// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. // - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
// - etcd supports unix(s)://local-file naming schema // - etcd supports unix(s)://local-file naming schema
// (as opposed to unix:local-file canonical name used by grpc for current dir files). // (as opposed to unix:local-file canonical name used by grpc for current dir files).
// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) // - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
// is considered serverName - to allow local testing of cert-protected communication. // is considered serverName - to allow local testing of cert-protected communication.
//
// See more: // See more:
// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 // - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
// - https://golang.org/pkg/net/#Dial // - https://golang.org/pkg/net/#Dial

View File

@@ -25,15 +25,14 @@ import (
// Txn is the interface that wraps mini-transactions. // Txn is the interface that wraps mini-transactions.
// //
// Txn(context.TODO()).If( // Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1), // Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2) // Compare(Version(k1), "=", 2)
// ).Then( // ).Then(
// OpPut(k2,v2), OpPut(k3,v3) // OpPut(k2,v2), OpPut(k3,v3)
// ).Else( // ).Else(
// OpPut(k4,v4), OpPut(k5,v5) // OpPut(k4,v4), OpPut(k5,v5)
// ).Commit() // ).Commit()
//
type Txn interface { type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed, // If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations // the operations passed into Then() will be executed. Or the operations

View File

@@ -848,7 +848,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
} }
} else { } else {
// current progress of watch; <= store revision // current progress of watch; <= store revision
nextRev = wr.Header.Revision nextRev = wr.Header.Revision + 1
} }
if len(wr.Events) > 0 { if len(wr.Events) > 0 {

View File

@@ -241,34 +241,34 @@ type intervalTree struct {
// //
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324 // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
// //
// 0. RB-DELETE(T, z) // RB-DELETE(T, z)
// 1. //
// 2. y = z // y = z
// 3. y-original-color = y.color // y-original-color = y.color
// 4. //
// 5. if z.left == T.nil // if z.left == T.nil
// 6. x = z.right // x = z.right
// 7. RB-TRANSPLANT(T, z, z.right) // RB-TRANSPLANT(T, z, z.right)
// 8. else if z.right == T.nil // else if z.right == T.nil
// 9. x = z.left // x = z.left
// 10. RB-TRANSPLANT(T, z, z.left) // RB-TRANSPLANT(T, z, z.left)
// 11. else // else
// 12. y = TREE-MINIMUM(z.right) // y = TREE-MINIMUM(z.right)
// 13. y-original-color = y.color // y-original-color = y.color
// 14. x = y.right // x = y.right
// 15. if y.p == z // if y.p == z
// 16. x.p = y // x.p = y
// 17. else // else
// 18. RB-TRANSPLANT(T, y, y.right) // RB-TRANSPLANT(T, y, y.right)
// 19. y.right = z.right // y.right = z.right
// 20. y.right.p = y // y.right.p = y
// 21. RB-TRANSPLANT(T, z, y) // RB-TRANSPLANT(T, z, y)
// 22. y.left = z.left // y.left = z.left
// 23. y.left.p = y // y.left.p = y
// 24. y.color = z.color // y.color = z.color
// 25. //
// 26. if y-original-color == BLACK // if y-original-color == BLACK
// 27. RB-DELETE-FIXUP(T, x) // RB-DELETE-FIXUP(T, x)
// Delete removes the node with the given interval from the tree, returning // Delete removes the node with the given interval from the tree, returning
// true if a node is in fact removed. // true if a node is in fact removed.
@@ -317,48 +317,47 @@ func (ivt *intervalTree) Delete(ivl Interval) bool {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326 // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
// //
// 0. RB-DELETE-FIXUP(T, z) // RB-DELETE-FIXUP(T, z)
// 1.
// 2. while x ≠ T.root and x.color == BLACK
// 3. if x == x.p.left
// 4. w = x.p.right
// 5. if w.color == RED
// 6. w.color = BLACK
// 7. x.p.color = RED
// 8. LEFT-ROTATE(T, x, p)
// 9. if w.left.color == BLACK and w.right.color == BLACK
// 10. w.color = RED
// 11. x = x.p
// 12. else if w.right.color == BLACK
// 13. w.left.color = BLACK
// 14. w.color = RED
// 15. RIGHT-ROTATE(T, w)
// 16. w = w.p.right
// 17. w.color = x.p.color
// 18. x.p.color = BLACK
// 19. LEFT-ROTATE(T, w.p)
// 20. x = T.root
// 21. else
// 22. w = x.p.left
// 23. if w.color == RED
// 24. w.color = BLACK
// 25. x.p.color = RED
// 26. RIGHT-ROTATE(T, x, p)
// 27. if w.right.color == BLACK and w.left.color == BLACK
// 28. w.color = RED
// 29. x = x.p
// 30. else if w.left.color == BLACK
// 31. w.right.color = BLACK
// 32. w.color = RED
// 33. LEFT-ROTATE(T, w)
// 34. w = w.p.left
// 35. w.color = x.p.color
// 36. x.p.color = BLACK
// 37. RIGHT-ROTATE(T, w.p)
// 38. x = T.root
// 39.
// 40. x.color = BLACK
// //
// while x ≠ T.root and x.color == BLACK
// if x == x.p.left
// w = x.p.right
// if w.color == RED
// w.color = BLACK
// x.p.color = RED
// LEFT-ROTATE(T, x, p)
// if w.left.color == BLACK and w.right.color == BLACK
// w.color = RED
// x = x.p
// else if w.right.color == BLACK
// w.left.color = BLACK
// w.color = RED
// RIGHT-ROTATE(T, w)
// w = w.p.right
// w.color = x.p.color
// x.p.color = BLACK
// LEFT-ROTATE(T, w.p)
// x = T.root
// else
// w = x.p.left
// if w.color == RED
// w.color = BLACK
// x.p.color = RED
// RIGHT-ROTATE(T, x, p)
// if w.right.color == BLACK and w.left.color == BLACK
// w.color = RED
// x = x.p
// else if w.left.color == BLACK
// w.right.color = BLACK
// w.color = RED
// LEFT-ROTATE(T, w)
// w = w.p.left
// w.color = x.p.color
// x.p.color = BLACK
// RIGHT-ROTATE(T, w.p)
// x = T.root
//
// x.color = BLACK
func (ivt *intervalTree) deleteFixup(x *intervalNode) { func (ivt *intervalTree) deleteFixup(x *intervalNode) {
for x != ivt.root && x.color(ivt.sentinel) == black { for x != ivt.root && x.color(ivt.sentinel) == black {
if x == x.parent.left { // line 3-20 if x == x.parent.left { // line 3-20
@@ -439,32 +438,32 @@ func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *inte
// //
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315 // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
// //
// 0. RB-INSERT(T, z) // RB-INSERT(T, z)
// 1. //
// 2. y = T.nil // y = T.nil
// 3. x = T.root // x = T.root
// 4. //
// 5. while x ≠ T.nil // while x ≠ T.nil
// 6. y = x // y = x
// 7. if z.key < x.key // if z.key < x.key
// 8. x = x.left // x = x.left
// 9. else // else
// 10. x = x.right // x = x.right
// 11. //
// 12. z.p = y // z.p = y
// 13. //
// 14. if y == T.nil // if y == T.nil
// 15. T.root = z // T.root = z
// 16. else if z.key < y.key // else if z.key < y.key
// 17. y.left = z // y.left = z
// 18. else // else
// 19. y.right = z // y.right = z
// 20. //
// 21. z.left = T.nil // z.left = T.nil
// 22. z.right = T.nil // z.right = T.nil
// 23. z.color = RED // z.color = RED
// 24. //
// 25. RB-INSERT-FIXUP(T, z) // RB-INSERT-FIXUP(T, z)
// Insert adds a node with the given interval into the tree. // Insert adds a node with the given interval into the tree.
func (ivt *intervalTree) Insert(ivl Interval, val interface{}) { func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
@@ -499,38 +498,37 @@ func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316 // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
// //
// 0. RB-INSERT-FIXUP(T, z) // RB-INSERT-FIXUP(T, z)
// 1.
// 2. while z.p.color == RED
// 3. if z.p == z.p.p.left
// 4. y = z.p.p.right
// 5. if y.color == RED
// 6. z.p.color = BLACK
// 7. y.color = BLACK
// 8. z.p.p.color = RED
// 9. z = z.p.p
// 10. else if z == z.p.right
// 11. z = z.p
// 12. LEFT-ROTATE(T, z)
// 13. z.p.color = BLACK
// 14. z.p.p.color = RED
// 15. RIGHT-ROTATE(T, z.p.p)
// 16. else
// 17. y = z.p.p.left
// 18. if y.color == RED
// 19. z.p.color = BLACK
// 20. y.color = BLACK
// 21. z.p.p.color = RED
// 22. z = z.p.p
// 23. else if z == z.p.right
// 24. z = z.p
// 25. RIGHT-ROTATE(T, z)
// 26. z.p.color = BLACK
// 27. z.p.p.color = RED
// 28. LEFT-ROTATE(T, z.p.p)
// 29.
// 30. T.root.color = BLACK
// //
// while z.p.color == RED
// if z.p == z.p.p.left
// y = z.p.p.right
// if y.color == RED
// z.p.color = BLACK
// y.color = BLACK
// z.p.p.color = RED
// z = z.p.p
// else if z == z.p.right
// z = z.p
// LEFT-ROTATE(T, z)
// z.p.color = BLACK
// z.p.p.color = RED
// RIGHT-ROTATE(T, z.p.p)
// else
// y = z.p.p.left
// if y.color == RED
// z.p.color = BLACK
// y.color = BLACK
// z.p.p.color = RED
// z = z.p.p
// else if z == z.p.right
// z = z.p
// RIGHT-ROTATE(T, z)
// z.p.color = BLACK
// z.p.p.color = RED
// LEFT-ROTATE(T, z.p.p)
//
// T.root.color = BLACK
func (ivt *intervalTree) insertFixup(z *intervalNode) { func (ivt *intervalTree) insertFixup(z *intervalNode) {
for z.parent.color(ivt.sentinel) == red { for z.parent.color(ivt.sentinel) == red {
if z.parent == z.parent.parent.left { // line 3-15 if z.parent == z.parent.parent.left { // line 3-15
@@ -578,26 +576,25 @@ func (ivt *intervalTree) insertFixup(z *intervalNode) {
// //
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313 // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
// //
// 0. LEFT-ROTATE(T, x) // LEFT-ROTATE(T, x)
// 1.
// 2. y = x.right
// 3. x.right = y.left
// 4.
// 5. if y.left ≠ T.nil
// 6. y.left.p = x
// 7.
// 8. y.p = x.p
// 9.
// 10. if x.p == T.nil
// 11. T.root = y
// 12. else if x == x.p.left
// 13. x.p.left = y
// 14. else
// 15. x.p.right = y
// 16.
// 17. y.left = x
// 18. x.p = y
// //
// y = x.right
// x.right = y.left
//
// if y.left ≠ T.nil
// y.left.p = x
//
// y.p = x.p
//
// if x.p == T.nil
// T.root = y
// else if x == x.p.left
// x.p.left = y
// else
// x.p.right = y
//
// y.left = x
// x.p = y
func (ivt *intervalTree) rotateLeft(x *intervalNode) { func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateLeft x must have right child // rotateLeft x must have right child
if x.right == ivt.sentinel { if x.right == ivt.sentinel {
@@ -624,26 +621,25 @@ func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateRight moves x so it is right of its left child // rotateRight moves x so it is right of its left child
// //
// 0. RIGHT-ROTATE(T, x) // RIGHT-ROTATE(T, x)
// 1.
// 2. y = x.left
// 3. x.left = y.right
// 4.
// 5. if y.right ≠ T.nil
// 6. y.right.p = x
// 7.
// 8. y.p = x.p
// 9.
// 10. if x.p == T.nil
// 11. T.root = y
// 12. else if x == x.p.right
// 13. x.p.right = y
// 14. else
// 15. x.p.left = y
// 16.
// 17. y.right = x
// 18. x.p = y
// //
// y = x.left
// x.left = y.right
//
// if y.right ≠ T.nil
// y.right.p = x
//
// y.p = x.p
//
// if x.p == T.nil
// T.root = y
// else if x == x.p.right
// x.p.right = y
// else
// x.p.left = y
//
// y.right = x
// x.p = y
func (ivt *intervalTree) rotateRight(x *intervalNode) { func (ivt *intervalTree) rotateRight(x *intervalNode) {
// rotateRight x must have left child // rotateRight x must have left child
if x.left == ivt.sentinel { if x.left == ivt.sentinel {

View File

@@ -103,7 +103,7 @@ func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error)
) )
return "", err return "", err
} }
if host == "localhost" || net.ParseIP(host) != nil { if host == "localhost" {
return "", nil return "", nil
} }
for ctx.Err() == nil { for ctx.Err() == nil {

View File

@@ -37,9 +37,11 @@ type Changer struct {
// config is empty and initializes it with a copy of the incoming (=left) // config is empty and initializes it with a copy of the incoming (=left)
// majority config. That is, it transitions from // majority config. That is, it transitions from
// //
// (1 2 3)&&() // (1 2 3)&&()
//
// to // to
// (1 2 3)&&(1 2 3). //
// (1 2 3)&&(1 2 3).
// //
// The supplied changes are then applied to the incoming majority config, // The supplied changes are then applied to the incoming majority config,
// resulting in a joint configuration that in terms of the Raft thesis[1] // resulting in a joint configuration that in terms of the Raft thesis[1]

107
vendor/go.etcd.io/etcd/raft/v3/doc.go generated vendored
View File

@@ -25,46 +25,46 @@ A simple example application, _raftexample_, is also available to help illustrat
how to use this package in practice: how to use this package in practice:
https://github.com/etcd-io/etcd/tree/main/contrib/raftexample https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
Usage # Usage
The primary object in raft is a Node. You either start a Node from scratch The primary object in raft is a Node. You either start a Node from scratch
using raft.StartNode or start a Node from some initial state using raft.RestartNode. using raft.StartNode or start a Node from some initial state using raft.RestartNode.
To start a node from scratch: To start a node from scratch:
storage := raft.NewMemoryStorage() storage := raft.NewMemoryStorage()
c := &Config{ c := &Config{
ID: 0x01, ID: 0x01,
ElectionTick: 10, ElectionTick: 10,
HeartbeatTick: 1, HeartbeatTick: 1,
Storage: storage, Storage: storage,
MaxSizePerMsg: 4096, MaxSizePerMsg: 4096,
MaxInflightMsgs: 256, MaxInflightMsgs: 256,
} }
n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
To restart a node from previous state: To restart a node from previous state:
storage := raft.NewMemoryStorage() storage := raft.NewMemoryStorage()
// recover the in-memory storage from persistent // recover the in-memory storage from persistent
// snapshot, state and entries. // snapshot, state and entries.
storage.ApplySnapshot(snapshot) storage.ApplySnapshot(snapshot)
storage.SetHardState(state) storage.SetHardState(state)
storage.Append(entries) storage.Append(entries)
c := &Config{ c := &Config{
ID: 0x01, ID: 0x01,
ElectionTick: 10, ElectionTick: 10,
HeartbeatTick: 1, HeartbeatTick: 1,
Storage: storage, Storage: storage,
MaxSizePerMsg: 4096, MaxSizePerMsg: 4096,
MaxInflightMsgs: 256, MaxInflightMsgs: 256,
} }
// restart raft without peer information. // restart raft without peer information.
// peer information is already included in the storage. // peer information is already included in the storage.
n := raft.RestartNode(c) n := raft.RestartNode(c)
Now that you are holding onto a Node you have a few responsibilities: Now that you are holding onto a Node you have a few responsibilities:
@@ -120,29 +120,29 @@ represented by an abstract "tick".
The total state machine handling loop will look something like this: The total state machine handling loop will look something like this:
for { for {
select { select {
case <-s.Ticker: case <-s.Ticker:
n.Tick() n.Tick()
case rd := <-s.Node.Ready(): case rd := <-s.Node.Ready():
saveToStorage(rd.State, rd.Entries, rd.Snapshot) saveToStorage(rd.State, rd.Entries, rd.Snapshot)
send(rd.Messages) send(rd.Messages)
if !raft.IsEmptySnap(rd.Snapshot) { if !raft.IsEmptySnap(rd.Snapshot) {
processSnapshot(rd.Snapshot) processSnapshot(rd.Snapshot)
} }
for _, entry := range rd.CommittedEntries { for _, entry := range rd.CommittedEntries {
process(entry) process(entry)
if entry.Type == raftpb.EntryConfChange { if entry.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange var cc raftpb.ConfChange
cc.Unmarshal(entry.Data) cc.Unmarshal(entry.Data)
s.Node.ApplyConfChange(cc) s.Node.ApplyConfChange(cc)
} }
} }
s.Node.Advance() s.Node.Advance()
case <-s.done: case <-s.done:
return return
} }
} }
To propose changes to the state machine from your node take your application To propose changes to the state machine from your node take your application
data, serialize it into a byte slice and call: data, serialize it into a byte slice and call:
@@ -169,7 +169,7 @@ given ID MUST be used only once even if the old node has been removed.
This means that for example IP addresses make poor node IDs since they This means that for example IP addresses make poor node IDs since they
may be reused. Node IDs must be non-zero. may be reused. Node IDs must be non-zero.
Implementation notes # Implementation notes
This implementation is up to date with the final Raft thesis This implementation is up to date with the final Raft thesis
(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
@@ -194,7 +194,7 @@ cannot be removed any more since the cluster cannot make progress.
For this reason it is highly recommended to use three or more nodes in For this reason it is highly recommended to use three or more nodes in
every cluster. every cluster.
MessageType # MessageType
Package raft sends and receives message in Protocol Buffer format (defined Package raft sends and receives message in Protocol Buffer format (defined
in raftpb package). Each state (follower, candidate, leader) implements its in raftpb package). Each state (follower, candidate, leader) implements its
@@ -295,6 +295,5 @@ stale log entries:
that the follower that sent this 'MsgUnreachable' is not reachable, often that the follower that sent this 'MsgUnreachable' is not reachable, often
indicating 'MsgApp' is lost. When follower's progress state is replicate, indicating 'MsgApp' is lost. When follower's progress state is replicate,
the leader sets it back to probe. the leader sets it back to probe.
*/ */
package raft package raft

View File

@@ -42,7 +42,7 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
// rev isn't used in JWT, it is only used in simple token // rev isn't used in JWT, it is only used in simple token
var ( var (
username string username string
revision uint64 revision float64
) )
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
@@ -74,10 +74,19 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
return nil, false return nil, false
} }
username = claims["username"].(string) username, ok = claims["username"].(string)
revision = uint64(claims["revision"].(float64)) if !ok {
t.lg.Warn("failed to obtain user claims from jwt token")
return nil, false
}
return &AuthInfo{Username: username, Revision: revision}, true revision, ok = claims["revision"].(float64)
if !ok {
t.lg.Warn("failed to obtain revision claims from jwt token")
return nil, false
}
return &AuthInfo{Username: username, Revision: uint64(revision)}, true
} }
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {

View File

@@ -76,8 +76,10 @@ func checkKeyInterval(
cachedPerms *unifiedRangePermissions, cachedPerms *unifiedRangePermissions,
key, rangeEnd []byte, key, rangeEnd []byte,
permtyp authpb.Permission_Type) bool { permtyp authpb.Permission_Type) bool {
if len(rangeEnd) == 1 && rangeEnd[0] == 0 { if isOpenEnded(rangeEnd) {
rangeEnd = nil rangeEnd = nil
// nil rangeEnd will be converetd to []byte{}, the largest element of BytesAffineComparable,
// in NewBytesAffineInterval().
} }
ivl := adt.NewBytesAffineInterval(key, rangeEnd) ivl := adt.NewBytesAffineInterval(key, rangeEnd)
@@ -153,3 +155,51 @@ type unifiedRangePermissions struct {
readPerms adt.IntervalTree readPerms adt.IntervalTree
writePerms adt.IntervalTree writePerms adt.IntervalTree
} }
// Constraints related to key range
// Assumptions:
// a1. key must be non-nil
// a2. []byte{} (in the case of string, "") is not a valid key of etcd
// For representing an open-ended range, BytesAffineComparable uses []byte{} as the largest element.
// a3. []byte{0x00} is the minimum valid etcd key
//
// Based on the above assumptions, key and rangeEnd must follow below rules:
// b1. for representing a single key point, rangeEnd should be nil or zero length byte array (in the case of string, "")
// Rule a2 guarantees that (X, []byte{}) for any X is not a valid range. So such ranges can be used for representing
// a single key permission.
//
// b2. key range with upper limit, like (X, Y), larger or equal to X and smaller than Y
//
// b3. key range with open-ended, like (X, <open ended>), is represented like (X, []byte{0x00})
// Because of rule a3, if we have (X, []byte{0x00}), such a range represents an empty range and makes no sense to have
// such a permission. So we use []byte{0x00} for representing an open-ended permission.
// Note that rangeEnd with []byte{0x00} will be converted into []byte{} before inserted into the interval tree
// (rule a2 ensures that this is the largest element).
// Special range like key = []byte{0x00} and rangeEnd = []byte{0x00} is treated as a range which matches with all keys.
//
// Treating a range whose rangeEnd with []byte{0x00} as an open-ended comes from the rules of Range() and Watch() API.
func isOpenEnded(rangeEnd []byte) bool { // check rule b3
return len(rangeEnd) == 1 && rangeEnd[0] == 0
}
func isValidPermissionRange(key, rangeEnd []byte) bool {
if len(key) == 0 {
return false
}
if rangeEnd == nil || len(rangeEnd) == 0 { // ensure rule b1
return true
}
begin := adt.BytesAffineComparable(key)
end := adt.BytesAffineComparable(rangeEnd)
if begin.Compare(end) == -1 { // rule b2
return true
}
if isOpenEnded(rangeEnd) {
return true
}
return false
}

View File

@@ -479,7 +479,8 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
var password []byte var password []byte
var err error var err error
if !user.Options.NoPassword { // Backward compatible with old versions of etcd, user options is nil
if user.Options == nil || !user.Options.NoPassword {
password, err = as.selectPassword(r.Password, r.HashedPassword) password, err = as.selectPassword(r.Password, r.HashedPassword)
if err != nil { if err != nil {
return nil, ErrNoPasswordUser return nil, ErrNoPasswordUser
@@ -791,6 +792,9 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
if r.Perm == nil { if r.Perm == nil {
return nil, ErrPermissionNotGiven return nil, ErrPermissionNotGiven
} }
if !isValidPermissionRange(r.Perm.Key, r.Perm.RangeEnd) {
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.LockInsideApply() tx.LockInsideApply()

View File

@@ -15,6 +15,7 @@
package embed package embed
import ( import (
"crypto/tls"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math" "math"
@@ -204,12 +205,12 @@ type Config struct {
// streams that each client can open at a time. // streams that each client can open at a time.
MaxConcurrentStreams uint32 `json:"max-concurrent-streams"` MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
LPUrls, LCUrls []url.URL ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
APUrls, ACUrls []url.URL AdvertisePeerUrls, AdvertiseClientUrls []url.URL
ClientTLSInfo transport.TLSInfo ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo PeerTLSInfo transport.TLSInfo
PeerAutoTLS bool PeerAutoTLS bool
// SelfSignedCertValidity specifies the validity period of the client and peer certificates // SelfSignedCertValidity specifies the validity period of the client and peer certificates
// that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS, // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS,
// the unit is year, and the default is 1 // the unit is year, and the default is 1
@@ -220,6 +221,11 @@ type Config struct {
// Note that cipher suites are prioritized in the given order. // Note that cipher suites are prioritized in the given order.
CipherSuites []string `json:"cipher-suites"` CipherSuites []string `json:"cipher-suites"`
// TlsMinVersion is the minimum accepted TLS version between client/server and peers.
TlsMinVersion string `json:"tls-min-version"`
// TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
TlsMaxVersion string `json:"tls-max-version"`
ClusterState string `json:"initial-cluster-state"` ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"` DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"` DNSClusterServiceName string `json:"discovery-srv-name"`
@@ -417,10 +423,11 @@ type configYAML struct {
// configJSON has file options that are translated into Config options // configJSON has file options that are translated into Config options
type configJSON struct { type configJSON struct {
LPUrlsJSON string `json:"listen-peer-urls"` ListenPeerUrls string `json:"listen-peer-urls"`
LCUrlsJSON string `json:"listen-client-urls"` ListenClientUrls string `json:"listen-client-urls"`
APUrlsJSON string `json:"initial-advertise-peer-urls"` ListenClientHttpUrls string `json:"listen-client-http-urls"`
ACUrlsJSON string `json:"advertise-client-urls"` AdvertisePeerUrls string `json:"initial-advertise-peer-urls"`
AdvertiseClientUrls string `json:"advertise-client-urls"`
CORSJSON string `json:"cors"` CORSJSON string `json:"cors"`
HostWhitelistJSON string `json:"host-whitelist"` HostWhitelistJSON string `json:"host-whitelist"`
@@ -469,10 +476,10 @@ func NewConfig() *Config {
ElectionMs: 1000, ElectionMs: 1000,
InitialElectionTickAdvance: true, InitialElectionTickAdvance: true,
LPUrls: []url.URL{*lpurl}, ListenPeerUrls: []url.URL{*lpurl},
LCUrls: []url.URL{*lcurl}, ListenClientUrls: []url.URL{*lcurl},
APUrls: []url.URL{*apurl}, AdvertisePeerUrls: []url.URL{*apurl},
ACUrls: []url.URL{*acurl}, AdvertiseClientUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew, ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster", InitialClusterToken: "etcd-cluster",
@@ -533,40 +540,49 @@ func (cfg *configYAML) configFromFile(path string) error {
return err return err
} }
if cfg.LPUrlsJSON != "" { if cfg.configJSON.ListenPeerUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerUrls, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.LPUrls = []url.URL(u) cfg.Config.ListenPeerUrls = u
} }
if cfg.LCUrlsJSON != "" { if cfg.configJSON.ListenClientUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientUrls, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.LCUrls = []url.URL(u) cfg.Config.ListenClientUrls = u
} }
if cfg.APUrlsJSON != "" { if cfg.configJSON.ListenClientHttpUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHttpUrls, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.ListenClientHttpUrls = u
}
if cfg.configJSON.AdvertisePeerUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerUrls, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.APUrls = []url.URL(u) cfg.Config.AdvertisePeerUrls = u
} }
if cfg.ACUrlsJSON != "" { if cfg.configJSON.AdvertiseClientUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientUrls, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.ACUrls = []url.URL(u) cfg.Config.AdvertiseClientUrls = u
} }
if cfg.ListenMetricsUrlsJSON != "" { if cfg.ListenMetricsUrlsJSON != "" {
@@ -628,26 +644,43 @@ func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
return nil return nil
} }
func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
// Validate() has been called to check the user input, so it should never fail.
var err error
if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
panic(err)
}
if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
panic(err)
}
}
// Validate ensures that '*embed.Config' fields are properly configured. // Validate ensures that '*embed.Config' fields are properly configured.
func (cfg *Config) Validate() error { func (cfg *Config) Validate() error {
if err := cfg.setupLogging(); err != nil { if err := cfg.setupLogging(); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.LPUrls); err != nil { if err := checkBindURLs(cfg.ListenPeerUrls); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.LCUrls); err != nil { if err := checkBindURLs(cfg.ListenClientUrls); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
return err
}
if len(cfg.ListenClientHttpUrls) == 0 {
cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
}
if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil { if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
return err return err
} }
if err := checkHostURLs(cfg.APUrls); err != nil { if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil {
addrs := cfg.getAPURLs() addrs := cfg.getAdvertisePeerUrls()
return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
} }
if err := checkHostURLs(cfg.ACUrls); err != nil { if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil {
addrs := cfg.getACURLs() addrs := cfg.getAdvertiseClientUrls()
return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
} }
// Check if conflicting flags are passed. // Check if conflicting flags are passed.
@@ -680,7 +713,7 @@ func (cfg *Config) Validate() error {
} }
// check this last since proxying in etcdmain may make this OK // check this last since proxying in etcdmain may make this OK
if cfg.LCUrls != nil && cfg.ACUrls == nil { if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil {
return ErrUnsetAdvertiseClientURLsFlag return ErrUnsetAdvertiseClientURLsFlag
} }
@@ -703,6 +736,25 @@ func (cfg *Config) Validate() error {
return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime) return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime)
} }
minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
if err != nil {
return err
}
maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
if err != nil {
return err
}
// maxVersion == 0 means that Go selects the highest available version.
if maxVersion != 0 && minVersion > maxVersion {
return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
}
// Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
}
return nil return nil
} }
@@ -714,7 +766,7 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok
urlsmap = types.URLsMap{} urlsmap = types.URLsMap{}
// If using discovery, generate a temporary cluster based on // If using discovery, generate a temporary cluster based on
// self's advertised peer URLs // self's advertised peer URLs
urlsmap[cfg.Name] = cfg.APUrls urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
token = cfg.Durl token = cfg.Durl
case cfg.DNSCluster != "": case cfg.DNSCluster != "":
@@ -768,7 +820,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
// Use both etcd-server-ssl and etcd-server for discovery. // Use both etcd-server-ssl and etcd-server for discovery.
// Combine the results if both are available. // Combine the results if both are available.
clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
if cerr != nil { if cerr != nil {
clusterStrs = make([]string, 0) clusterStrs = make([]string, 0)
} }
@@ -778,12 +830,12 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix), zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
zap.String("server-name", cfg.Name), zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster), zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAPURLs()), zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()),
zap.Strings("found-cluster", clusterStrs), zap.Strings("found-cluster", clusterStrs),
zap.Error(cerr), zap.Error(cerr),
) )
defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
if httpCerr == nil { if httpCerr == nil {
clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...) clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
} }
@@ -793,7 +845,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server"+serviceNameSuffix), zap.String("service-name", "etcd-server"+serviceNameSuffix),
zap.String("server-name", cfg.Name), zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster), zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAPURLs()), zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()),
zap.Strings("found-cluster", clusterStrs), zap.Strings("found-cluster", clusterStrs),
zap.Error(httpCerr), zap.Error(httpCerr),
) )
@@ -802,15 +854,15 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
} }
func (cfg Config) InitialClusterFromName(name string) (ret string) { func (cfg Config) InitialClusterFromName(name string) (ret string) {
if len(cfg.APUrls) == 0 { if len(cfg.AdvertisePeerUrls) == 0 {
return "" return ""
} }
n := name n := name
if name == "" { if name == "" {
n = DefaultName n = DefaultName
} }
for i := range cfg.APUrls { for i := range cfg.AdvertisePeerUrls {
ret = ret + "," + n + "=" + cfg.APUrls[i].String() ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String()
} }
return ret[1:] return ret[1:]
} }
@@ -826,11 +878,11 @@ func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum {
} }
func (cfg Config) defaultPeerHost() bool { func (cfg Config) defaultPeerHost() bool {
return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs
} }
func (cfg Config) defaultClientHost() bool { func (cfg Config) defaultClientHost() bool {
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs
} }
func (cfg *Config) ClientSelfCert() (err error) { func (cfg *Config) ClientSelfCert() (err error) {
@@ -841,9 +893,12 @@ func (cfg *Config) ClientSelfCert() (err error) {
cfg.logger.Warn("ignoring client auto TLS since certs given") cfg.logger.Warn("ignoring client auto TLS since certs given")
return nil return nil
} }
chosts := make([]string, len(cfg.LCUrls)) chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls))
for i, u := range cfg.LCUrls { for _, u := range cfg.ListenClientUrls {
chosts[i] = u.Host chosts = append(chosts, u.Host)
}
for _, u := range cfg.ListenClientHttpUrls {
chosts = append(chosts, u.Host)
} }
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity) cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity)
if err != nil { if err != nil {
@@ -860,8 +915,8 @@ func (cfg *Config) PeerSelfCert() (err error) {
cfg.logger.Warn("ignoring peer auto TLS since certs given") cfg.logger.Warn("ignoring peer auto TLS since certs given")
return nil return nil
} }
phosts := make([]string, len(cfg.LPUrls)) phosts := make([]string, len(cfg.ListenPeerUrls))
for i, u := range cfg.LPUrls { for i, u := range cfg.ListenPeerUrls {
phosts[i] = u.Host phosts[i] = u.Host
} }
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity) cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity)
@@ -889,9 +944,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
} }
used := false used := false
pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port()
if cfg.defaultPeerHost() && pip == "0.0.0.0" { if cfg.defaultPeerHost() && pip == "0.0.0.0" {
cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
used = true used = true
} }
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
@@ -899,9 +954,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
} }
cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port()
if cfg.defaultClientHost() && cip == "0.0.0.0" { if cfg.defaultClientHost() && cip == "0.0.0.0" {
cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
used = true used = true
} }
dhost := defaultHostname dhost := defaultHostname
@@ -946,34 +1001,42 @@ func checkHostURLs(urls []url.URL) error {
return nil return nil
} }
func (cfg *Config) getAPURLs() (ss []string) { func (cfg *Config) getAdvertisePeerUrls() (ss []string) {
ss = make([]string, len(cfg.APUrls)) ss = make([]string, len(cfg.AdvertisePeerUrls))
for i := range cfg.APUrls { for i := range cfg.AdvertisePeerUrls {
ss[i] = cfg.APUrls[i].String() ss[i] = cfg.AdvertisePeerUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getLPURLs() (ss []string) { func (cfg *Config) getListenPeerUrls() (ss []string) {
ss = make([]string, len(cfg.LPUrls)) ss = make([]string, len(cfg.ListenPeerUrls))
for i := range cfg.LPUrls { for i := range cfg.ListenPeerUrls {
ss[i] = cfg.LPUrls[i].String() ss[i] = cfg.ListenPeerUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getACURLs() (ss []string) { func (cfg *Config) getAdvertiseClientUrls() (ss []string) {
ss = make([]string, len(cfg.ACUrls)) ss = make([]string, len(cfg.AdvertiseClientUrls))
for i := range cfg.ACUrls { for i := range cfg.AdvertiseClientUrls {
ss[i] = cfg.ACUrls[i].String() ss[i] = cfg.AdvertiseClientUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getLCURLs() (ss []string) { func (cfg *Config) getListenClientUrls() (ss []string) {
ss = make([]string, len(cfg.LCUrls)) ss = make([]string, len(cfg.ListenClientUrls))
for i := range cfg.LCUrls { for i := range cfg.ListenClientUrls {
ss[i] = cfg.LCUrls[i].String() ss[i] = cfg.ListenClientUrls[i].String()
}
return ss
}
func (cfg *Config) getListenClientHttpUrls() (ss []string) {
ss = make([]string, len(cfg.ListenClientHttpUrls))
for i := range cfg.ListenClientHttpUrls {
ss[i] = cfg.ListenClientHttpUrls[i].String()
} }
return ss return ss
} }

View File

@@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
defaultLog "log" defaultLog "log"
"math"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@@ -32,6 +33,7 @@ import (
"go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v3/credentials"
"go.etcd.io/etcd/pkg/v3/debugutil" "go.etcd.io/etcd/pkg/v3/debugutil"
runtimeutil "go.etcd.io/etcd/pkg/v3/runtime" runtimeutil "go.etcd.io/etcd/pkg/v3/runtime"
"go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/config"
@@ -48,6 +50,7 @@ import (
"github.com/soheilhy/cmux" "github.com/soheilhy/cmux"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
) )
@@ -123,7 +126,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
} }
e.cfg.logger.Info( e.cfg.logger.Info(
"configuring peer listeners", "configuring peer listeners",
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()),
) )
if e.Peers, err = configurePeerListeners(cfg); err != nil { if e.Peers, err = configurePeerListeners(cfg); err != nil {
return e, err return e, err
@@ -131,7 +134,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
e.cfg.logger.Info( e.cfg.logger.Info(
"configuring client listeners", "configuring client listeners",
zap.Strings("listen-client-urls", e.cfg.getLCURLs()), zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()),
) )
if e.sctxs, err = configureClientListeners(cfg); err != nil { if e.sctxs, err = configureClientListeners(cfg); err != nil {
return e, err return e, err
@@ -167,8 +170,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
srvcfg := config.ServerConfig{ srvcfg := config.ServerConfig{
Name: cfg.Name, Name: cfg.Name,
ClientURLs: cfg.ACUrls, ClientURLs: cfg.AdvertiseClientUrls,
PeerURLs: cfg.APUrls, PeerURLs: cfg.AdvertisePeerUrls,
DataDir: cfg.Dir, DataDir: cfg.Dir,
DedicatedWALDir: cfg.WalDir, DedicatedWALDir: cfg.WalDir,
SnapshotCount: cfg.SnapshotCount, SnapshotCount: cfg.SnapshotCount,
@@ -275,10 +278,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
e.cfg.logger.Info( e.cfg.logger.Info(
"now serving peer/client/metrics", "now serving peer/client/metrics",
zap.String("local-member-id", e.Server.ID().String()), zap.String("local-member-id", e.Server.ID().String()),
zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerUrls()),
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()),
zap.Strings("advertise-client-urls", e.cfg.getACURLs()), zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()),
zap.Strings("listen-client-urls", e.cfg.getLCURLs()), zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()),
zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
) )
serving = true serving = true
@@ -326,10 +329,10 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized
zap.Uint("max-wals", sc.MaxWALFiles), zap.Uint("max-wals", sc.MaxWALFiles),
zap.Uint("max-snapshots", sc.MaxSnapFiles), zap.Uint("max-snapshots", sc.MaxSnapFiles),
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()), zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerUrls()),
zap.Strings("listen-peer-urls", ec.getLPURLs()), zap.Strings("listen-peer-urls", ec.getListenPeerUrls()),
zap.Strings("advertise-client-urls", ec.getACURLs()), zap.Strings("advertise-client-urls", ec.getAdvertiseClientUrls()),
zap.Strings("listen-client-urls", ec.getLCURLs()), zap.Strings("listen-client-urls", ec.getListenClientUrls()),
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
zap.Strings("cors", cors), zap.Strings("cors", cors),
zap.Strings("host-whitelist", hss), zap.Strings("host-whitelist", hss),
@@ -366,8 +369,8 @@ func (e *Etcd) Close() {
fields := []zap.Field{ fields := []zap.Field{
zap.String("name", e.cfg.Name), zap.String("name", e.cfg.Name),
zap.String("data-dir", e.cfg.Dir), zap.String("data-dir", e.cfg.Dir),
zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()), zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerUrls()),
zap.Strings("advertise-client-urls", e.cfg.getACURLs()), zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()),
} }
lg := e.GetLogger() lg := e.GetLogger()
lg.Info("closing etcd server", fields...) lg.Info("closing etcd server", fields...)
@@ -437,11 +440,16 @@ func (e *Etcd) Close() {
func stopServers(ctx context.Context, ss *servers) { func stopServers(ctx context.Context, ss *servers) {
// first, close the http.Server // first, close the http.Server
ss.http.Shutdown(ctx) if ss.http != nil {
// do not grpc.Server.GracefulStop with TLS enabled etcd server ss.http.Shutdown(ctx)
}
if ss.grpc == nil {
return
}
// do not grpc.Server.GracefulStop when grpc runs under http server
// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
// and https://github.com/etcd-io/etcd/issues/8916 // and https://github.com/etcd-io/etcd/issues/8916
if ss.secure { if ss.secure && ss.http != nil {
ss.grpc.Stop() ss.grpc.Stop()
return return
} }
@@ -480,6 +488,9 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if err = cfg.PeerSelfCert(); err != nil { if err = cfg.PeerSelfCert(); err != nil {
cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err)) cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
} }
updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if !cfg.PeerTLSInfo.Empty() { if !cfg.PeerTLSInfo.Empty() {
cfg.logger.Info( cfg.logger.Info(
"starting with peer TLS", "starting with peer TLS",
@@ -488,7 +499,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
) )
} }
peers = make([]*peerListener, len(cfg.LPUrls)) peers = make([]*peerListener, len(cfg.ListenPeerUrls))
defer func() { defer func() {
if err == nil { if err == nil {
return return
@@ -497,7 +508,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if peers[i] != nil && peers[i].close != nil { if peers[i] != nil && peers[i].close != nil {
cfg.logger.Warn( cfg.logger.Warn(
"closing peer listener", "closing peer listener",
zap.String("address", cfg.LPUrls[i].String()), zap.String("address", cfg.ListenPeerUrls[i].String()),
zap.Error(err), zap.Error(err),
) )
ctx, cancel := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
@@ -507,7 +518,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
} }
}() }()
for i, u := range cfg.LPUrls { for i, u := range cfg.ListenPeerUrls {
if u.Scheme == "http" { if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() { if !cfg.PeerTLSInfo.Empty() {
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String())) cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
@@ -600,13 +611,15 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if err = cfg.ClientSelfCert(); err != nil { if err = cfg.ClientSelfCert(); err != nil {
cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err)) cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
} }
updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if cfg.EnablePprof { if cfg.EnablePprof {
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
} }
sctxs = make(map[string]*serveCtx) sctxs = make(map[string]*serveCtx)
for _, u := range cfg.LCUrls { for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) {
sctx := newServeCtx(cfg.logger)
if u.Scheme == "http" || u.Scheme == "unix" { if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() { if !cfg.ClientTLSInfo.Empty() {
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String())) cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
@@ -618,24 +631,41 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String()) return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
} }
}
network := "tcp" for _, u := range cfg.ListenClientUrls {
addr := u.Host addr, secure, network := resolveUrl(u)
if u.Scheme == "unix" || u.Scheme == "unixs" { sctx := sctxs[addr]
network = "unix" if sctx == nil {
addr = u.Host + u.Path sctx = newServeCtx(cfg.logger)
sctxs[addr] = sctx
} }
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network sctx.network = network
}
for _, u := range cfg.ListenClientHttpUrls {
addr, secure, network := resolveUrl(u)
sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" sctx := sctxs[addr]
sctx.insecure = !sctx.secure if sctx == nil {
if oldctx := sctxs[addr]; oldctx != nil { sctx = newServeCtx(cfg.logger)
oldctx.secure = oldctx.secure || sctx.secure sctxs[addr] = sctx
oldctx.insecure = oldctx.insecure || sctx.insecure } else if !sctx.httpOnly {
continue return nil, fmt.Errorf("cannot bind both --client-listen-urls and --client-listen-http-urls on the same url %s", u.String())
} }
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network
sctx.httpOnly = true
}
if sctx.l, err = transport.NewListenerWithOpts(addr, u.Scheme, for _, sctx := range sctxs {
if sctx.l, err = transport.NewListenerWithOpts(sctx.addr, sctx.scheme,
transport.WithSocketOpts(&cfg.SocketOpts), transport.WithSocketOpts(&cfg.SocketOpts),
transport.WithSkipTLSInfoCheck(true), transport.WithSkipTLSInfoCheck(true),
); err != nil { ); err != nil {
@@ -643,7 +673,6 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
} }
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user. // hosts that disable ipv6. So, use the address given by the user.
sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum { if fdLimit <= reservedInternalFDNum {
@@ -656,17 +685,17 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
} }
defer func(u url.URL) { defer func(sctx *serveCtx) {
if err == nil { if err == nil || sctx.l == nil {
return return
} }
sctx.l.Close() sctx.l.Close()
cfg.logger.Warn( cfg.logger.Warn(
"closing peer listener", "closing peer listener",
zap.String("address", u.Host), zap.String("address", sctx.addr),
zap.Error(err), zap.Error(err),
) )
}(u) }(sctx)
for k := range cfg.UserHandlers { for k := range cfg.UserHandlers {
sctx.userHandlers[k] = cfg.UserHandlers[k] sctx.userHandlers[k] = cfg.UserHandlers[k]
} }
@@ -677,11 +706,21 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if cfg.LogLevel == "debug" { if cfg.LogLevel == "debug" {
sctx.registerTrace() sctx.registerTrace()
} }
sctxs[addr] = sctx
} }
return sctxs, nil return sctxs, nil
} }
func resolveUrl(u url.URL) (addr string, secure bool, network string) {
addr = u.Host
network = "tcp"
if u.Scheme == "unix" || u.Scheme == "unixs" {
addr = u.Host + u.Path
network = "unix"
}
secure = u.Scheme == "https" || u.Scheme == "unixs"
return addr, secure, network
}
func (e *Etcd) serveClients() (err error) { func (e *Etcd) serveClients() (err error) {
if !e.cfg.ClientTLSInfo.Empty() { if !e.cfg.ClientTLSInfo.Empty() {
e.cfg.logger.Info( e.cfg.logger.Info(
@@ -727,15 +766,69 @@ func (e *Etcd) serveClients() (err error) {
})) }))
} }
splitHttp := false
for _, sctx := range e.sctxs {
if sctx.httpOnly {
splitHttp = true
}
}
// start client servers in each goroutine // start client servers in each goroutine
for _, sctx := range e.sctxs { for _, sctx := range e.sctxs {
go func(s *serveCtx) { go func(s *serveCtx) {
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...)) e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...))
}(sctx) }(sctx)
} }
return nil return nil
} }
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
if !e.cfg.EnableGRPCGateway {
return nil
}
sctx := e.pickGrpcGatewayServeContext(splitHttp)
addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
if sctx.secure {
tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
if tlsErr != nil {
return func(ctx context.Context) (*grpc.ClientConn, error) {
return nil, tlsErr
}
}
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts = append(opts, grpc.WithTransportCredentials(bundle.TransportCredentials()))
} else {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
return func(ctx context.Context) (*grpc.ClientConn, error) {
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
return nil, err
}
return conn, err
}
}
func (e *Etcd) pickGrpcGatewayServeContext(splitHttp bool) *serveCtx {
for _, sctx := range e.sctxs {
if !splitHttp || !sctx.httpOnly {
return sctx
}
}
panic("Expect at least one context able to serve grpc")
}
func (e *Etcd) serveMetrics() (err error) { func (e *Etcd) serveMetrics() (err error) {
if e.cfg.Metrics == "extensive" { if e.cfg.Metrics == "extensive" {
grpc_prometheus.EnableHandlingTimeHistogram() grpc_prometheus.EnableHandlingTimeHistogram()

View File

@@ -19,14 +19,12 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
defaultLog "log" defaultLog "log"
"math"
"net" "net"
"net/http" "net/http"
"strings" "strings"
etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw" etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw"
"go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3/credentials"
"go.etcd.io/etcd/pkg/v3/debugutil" "go.etcd.io/etcd/pkg/v3/debugutil"
"go.etcd.io/etcd/pkg/v3/httputil" "go.etcd.io/etcd/pkg/v3/httputil"
"go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/config"
@@ -50,12 +48,15 @@ import (
) )
type serveCtx struct { type serveCtx struct {
lg *zap.Logger lg *zap.Logger
l net.Listener l net.Listener
scheme string
addr string addr string
network string network string
secure bool secure bool
insecure bool insecure bool
httpOnly bool
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
@@ -93,6 +94,8 @@ func (sctx *serveCtx) serve(
tlsinfo *transport.TLSInfo, tlsinfo *transport.TLSInfo,
handler http.Handler, handler http.Handler,
errHandler func(error), errHandler func(error),
grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
splitHttp bool,
gopts ...grpc.ServerOption) (err error) { gopts ...grpc.ServerOption) (err error) {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify() <-s.ReadyNotify()
@@ -100,115 +103,166 @@ func (sctx *serveCtx) serve(
sctx.lg.Info("ready to serve client requests") sctx.lg.Info("ready to serve client requests")
m := cmux.New(sctx.l) m := cmux.New(sctx.l)
var server func() error
onlyGRPC := splitHttp && !sctx.httpOnly
onlyHttp := splitHttp && sctx.httpOnly
grpcEnabled := !onlyHttp
httpEnabled := !onlyGRPC
v3c := v3client.New(s) v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c) servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c) servLock := v3lock.NewLockServer(v3c)
var gs *grpc.Server // Make sure serversC is closed even if we prematurely exit the function.
defer func() { defer close(sctx.serversC)
if err != nil && gs != nil { var gwmux *gw.ServeMux
gs.Stop() if s.Cfg.EnableGRPCGateway {
// GRPC gateway connects to grpc server via connection provided by grpc dial.
gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
if err != nil {
sctx.lg.Error("registerGateway failed", zap.Error(err))
return err
} }
}() }
var traffic string
switch {
case onlyGRPC:
traffic = "grpc"
case onlyHttp:
traffic = "http"
default:
traffic = "grpc+http"
}
if sctx.insecure { if sctx.insecure {
gs = v3rpc.Server(s, nil, nil, gopts...) var gs *grpc.Server
v3electionpb.RegisterElectionServer(gs, servElection) var srv *http.Server
v3lockpb.RegisterLockServer(gs, servLock) if httpEnabled {
if sctx.serviceRegister != nil { httpmux := sctx.createMux(gwmux, handler)
sctx.serviceRegister(gs) srv = &http.Server{
} Handler: createAccessController(sctx.lg, s, httpmux),
grpcl := m.Match(cmux.HTTP2()) ErrorLog: logger, // do not log user error
go func() { errHandler(gs.Serve(grpcl)) }() }
if err := configureHttpServer(srv, s.Cfg); err != nil {
var gwmux *gw.ServeMux sctx.lg.Error("Configure http server failed", zap.Error(err))
if s.Cfg.EnableGRPCGateway {
gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
if err != nil {
return err return err
} }
} }
if grpcEnabled {
httpmux := sctx.createMux(gwmux, handler) gs = v3rpc.Server(s, nil, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
srvhttp := &http.Server{ v3lockpb.RegisterLockServer(gs, servLock)
Handler: createAccessController(sctx.lg, s, httpmux), if sctx.serviceRegister != nil {
ErrorLog: logger, // do not log user error sctx.serviceRegister(gs)
}
defer func(gs *grpc.Server) {
if err != nil {
sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
gs.Stop()
sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
}
}(gs)
} }
if err := configureHttpServer(srvhttp, s.Cfg); err != nil { if onlyGRPC {
sctx.lg.Error("Configure http server failed", zap.Error(err)) server = func() error {
return err return gs.Serve(sctx.l)
} }
httpl := m.Match(cmux.HTTP1()) } else {
go func() { errHandler(srvhttp.Serve(httpl)) }() server = m.Serve
sctx.serversC <- &servers{grpc: gs, http: srvhttp} httpl := m.Match(cmux.HTTP1())
go func(srvhttp *http.Server, tlsLis net.Listener) {
errHandler(srvhttp.Serve(tlsLis))
}(srv, httpl)
if grpcEnabled {
grpcl := m.Match(cmux.HTTP2())
go func(gs *grpc.Server, l net.Listener) {
errHandler(gs.Serve(l))
}(gs, grpcl)
}
}
sctx.serversC <- &servers{grpc: gs, http: srv}
sctx.lg.Info( sctx.lg.Info(
"serving client traffic insecurely; this is strongly discouraged!", "serving client traffic insecurely; this is strongly discouraged!",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()), zap.String("address", sctx.l.Addr().String()),
) )
} }
if sctx.secure { if sctx.secure {
var gs *grpc.Server
var srv *http.Server
tlscfg, tlsErr := tlsinfo.ServerConfig() tlscfg, tlsErr := tlsinfo.ServerConfig()
if tlsErr != nil { if tlsErr != nil {
return tlsErr return tlsErr
} }
gs = v3rpc.Server(s, tlscfg, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
handler = grpcHandlerFunc(gs, handler)
var gwmux *gw.ServeMux if grpcEnabled {
if s.Cfg.EnableGRPCGateway { gs = v3rpc.Server(s, tlscfg, nil, gopts...)
dtls := tlscfg.Clone() v3electionpb.RegisterElectionServer(gs, servElection)
// trust local server v3lockpb.RegisterLockServer(gs, servLock)
dtls.InsecureSkipVerify = true if sctx.serviceRegister != nil {
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls}) sctx.serviceRegister(gs)
opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())} }
gwmux, err = sctx.registerGateway(opts) defer func(gs *grpc.Server) {
if err != nil { if err != nil {
sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
gs.Stop()
sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
}
}(gs)
}
if httpEnabled {
if grpcEnabled {
handler = grpcHandlerFunc(gs, handler)
}
httpmux := sctx.createMux(gwmux, handler)
srv = &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error
}
if err := configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure https server failed", zap.Error(err))
return err return err
} }
} }
var tlsl net.Listener if onlyGRPC {
tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) server = func() error { return gs.Serve(sctx.l) }
if err != nil { } else {
return err server = m.Serve
}
// TODO: add debug flag; enable logging when debug flag is set
httpmux := sctx.createMux(gwmux, handler)
srv := &http.Server{ tlsl, err := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
Handler: createAccessController(sctx.lg, s, httpmux), if err != nil {
TLSConfig: tlscfg, return err
ErrorLog: logger, // do not log user error }
go func(srvhttp *http.Server, tlsl net.Listener) {
errHandler(srvhttp.Serve(tlsl))
}(srv, tlsl)
} }
if err := configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure https server failed", zap.Error(err))
return err
}
go func() { errHandler(srv.Serve(tlsl)) }()
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
sctx.lg.Info( sctx.lg.Info(
"serving client traffic securely", "serving client traffic securely",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()), zap.String("address", sctx.l.Addr().String()),
) )
} }
close(sctx.serversC) return server()
return m.Serve()
} }
func configureHttpServer(srv *http.Server, cfg config.ServerConfig) error { func configureHttpServer(srv *http.Server, cfg config.ServerConfig) error {
// todo (ahrtr): should we support configuring other parameters in the future as well? // todo (ahrtr): should we support configuring other parameters in the future as well?
return http2.ConfigureServer(srv, &http2.Server{ return http2.ConfigureServer(srv, &http2.Server{
MaxConcurrentStreams: cfg.MaxConcurrentStreams, MaxConcurrentStreams: cfg.MaxConcurrentStreams,
// Override to avoid using priority scheduler which is affected by https://github.com/golang/go/issues/58804.
NewWriteScheduler: http2.NewRandomWriteScheduler,
}) })
} }
@@ -231,20 +285,10 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) {
ctx := sctx.ctx ctx := sctx.ctx
addr := sctx.addr conn, err := dial(ctx)
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
opts = append(opts, grpc.WithDefaultCallOptions([]grpc.CallOption{
grpc.MaxCallRecvMsgSize(math.MaxInt32),
}...))
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -279,6 +323,18 @@ func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, err
return gwmux, nil return gwmux, nil
} }
type wsProxyZapLogger struct {
*zap.Logger
}
func (w wsProxyZapLogger) Warnln(i ...interface{}) {
w.Warn(fmt.Sprint(i...))
}
func (w wsProxyZapLogger) Debugln(i ...interface{}) {
w.Debug(fmt.Sprint(i...))
}
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux() httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers { for path, h := range sctx.userHandlers {
@@ -298,6 +354,7 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
}, },
), ),
wsproxy.WithMaxRespBodyBufferSize(0x7fffffff), wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
), ),
) )
} }

View File

@@ -41,5 +41,4 @@
// if err != nil { // if err != nil {
// // handle error! // // handle error!
// } // }
//
package v3client package v3client

View File

@@ -144,6 +144,10 @@ type serverWatchStream struct {
// records fragmented watch IDs // records fragmented watch IDs
fragment map[mvcc.WatchID]bool fragment map[mvcc.WatchID]bool
// indicates whether we have an outstanding global progress
// notification to send
deferredProgress bool
// closec indicates the stream is closed. // closec indicates the stream is closed.
closec chan struct{} closec chan struct{}
@@ -173,6 +177,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
prevKV: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool),
fragment: make(map[mvcc.WatchID]bool), fragment: make(map[mvcc.WatchID]bool),
deferredProgress: false,
closec: make(chan struct{}), closec: make(chan struct{}),
} }
@@ -359,10 +365,16 @@ func (sws *serverWatchStream) recvLoop() error {
} }
case *pb.WatchRequest_ProgressRequest: case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil { if uv.ProgressRequest != nil {
sws.ctrlStream <- &pb.WatchResponse{ sws.mu.Lock()
Header: sws.newResponseHeader(sws.watchStream.Rev()), // Ignore if deferred progress notification is already in progress
WatchId: clientv3.InvalidWatchID, // response is not associated with any WatchId and will be broadcast to all watch channels if !sws.deferredProgress {
// Request progress for all watchers,
// force generation of a response
if !sws.watchStream.RequestProgressAll() {
sws.deferredProgress = true
}
} }
sws.mu.Unlock()
} }
default: default:
// we probably should not shutdown the entire stream when // we probably should not shutdown the entire stream when
@@ -430,11 +442,15 @@ func (sws *serverWatchStream) sendLoop() {
Canceled: canceled, Canceled: canceled,
} }
if _, okID := ids[wresp.WatchID]; !okID { // Progress notifications can have WatchID -1
// buffer if id not yet announced // if they announce on behalf of multiple watchers
wrs := append(pending[wresp.WatchID], wr) if wresp.WatchID != clientv3.InvalidWatchID {
pending[wresp.WatchID] = wrs if _, okID := ids[wresp.WatchID]; !okID {
continue // buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
} }
mvcc.ReportEventReceived(len(evs)) mvcc.ReportEventReceived(len(evs))
@@ -465,6 +481,11 @@ func (sws *serverWatchStream) sendLoop() {
// elide next progress update if sent a key update // elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false sws.progress[wresp.WatchID] = false
} }
if sws.deferredProgress {
if sws.watchStream.RequestProgressAll() {
sws.deferredProgress = false
}
}
sws.mu.Unlock() sws.mu.Unlock()
case c, ok := <-sws.ctrlStream: case c, ok := <-sws.ctrlStream:

View File

@@ -340,8 +340,9 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// the applying workflow. But when the client receives the response, // the applying workflow. But when the client receives the response,
// it doesn't mean etcd has already successfully saved the data, // it doesn't mean etcd has already successfully saved the data,
// including BoltDB and WAL, because: // including BoltDB and WAL, because:
// 1. etcd commits the boltDB transaction periodically instead of on each request; // 1. etcd commits the boltDB transaction periodically instead of on each request;
// 2. etcd saves WAL entries in parallel with applying the committed entries. // 2. etcd saves WAL entries in parallel with applying the committed entries.
//
// Accordingly, it might run into a situation of data loss when the etcd crashes // Accordingly, it might run into a situation of data loss when the etcd crashes
// immediately after responding to the client and before the boltDB and WAL // immediately after responding to the client and before the boltDB and WAL
// successfully save the data to disk. // successfully save the data to disk.

View File

@@ -454,6 +454,13 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
lg := s.Logger() lg := s.Logger()
// fix https://nvd.nist.gov/vuln/detail/CVE-2021-28235
defer func() {
if r != nil {
r.Password = ""
}
}()
var resp proto.Message var resp proto.Message
for { for {
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)

View File

@@ -39,9 +39,10 @@ var (
// key: "foo" // key: "foo"
// rev: 5 // rev: 5
// generations: // generations:
// {empty} //
// {4.0, 5.0(t)} // {empty}
// {1.0, 2.0, 3.0(t)} // {4.0, 5.0(t)}
// {1.0, 2.0, 3.0(t)}
// //
// Compact a keyIndex removes the versions with smaller or equal to // Compact a keyIndex removes the versions with smaller or equal to
// rev except the largest one. If the generation becomes empty // rev except the largest one. If the generation becomes empty
@@ -51,22 +52,26 @@ var (
// For example: // For example:
// compact(2) on the previous example // compact(2) on the previous example
// generations: // generations:
// {empty} //
// {4.0, 5.0(t)} // {empty}
// {2.0, 3.0(t)} // {4.0, 5.0(t)}
// {2.0, 3.0(t)}
// //
// compact(4) // compact(4)
// generations: // generations:
// {empty} //
// {4.0, 5.0(t)} // {empty}
// {4.0, 5.0(t)}
// //
// compact(5): // compact(5):
// generations: // generations:
// {empty} -> key SHOULD be removed. //
// {empty} -> key SHOULD be removed.
// //
// compact(6): // compact(6):
// generations: // generations:
// {empty} -> key SHOULD be removed. //
// {empty} -> key SHOULD be removed.
type keyIndex struct { type keyIndex struct {
key []byte key []byte
modified revision // the main rev of the last modification modified revision // the main rev of the last modification

View File

@@ -19,6 +19,7 @@ import (
"time" "time"
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/mvcc/backend"
@@ -41,6 +42,7 @@ var (
type watchable interface { type watchable interface {
watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
progress(w *watcher) progress(w *watcher)
progressAll(watchers map[WatchID]*watcher) bool
rev() int64 rev() int64
} }
@@ -324,10 +326,10 @@ func (s *watchableStore) moveVictims() (moved int) {
} }
// syncWatchers syncs unsynced watchers by: // syncWatchers syncs unsynced watchers by:
// 1. choose a set of watchers from the unsynced watcher group // 1. choose a set of watchers from the unsynced watcher group
// 2. iterate over the set to get the minimum revision and remove compacted watchers // 2. iterate over the set to get the minimum revision and remove compacted watchers
// 3. use minimum revision to get all key-value pairs and send those events to watchers // 3. use minimum revision to get all key-value pairs and send those events to watchers
// 4. remove synced watchers in set from unsynced group and move to synced group // 4. remove synced watchers in set from unsynced group and move to synced group
func (s *watchableStore) syncWatchers() int { func (s *watchableStore) syncWatchers() int {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@@ -447,7 +449,6 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
pendingEventsGauge.Add(float64(len(eb.evs))) pendingEventsGauge.Add(float64(len(eb.evs)))
} else { } else {
// move slow watcher to victims // move slow watcher to victims
w.minRev = rev + 1
if victim == nil { if victim == nil {
victim = make(watcherBatch) victim = make(watcherBatch)
} }
@@ -456,6 +457,10 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
s.synced.delete(w) s.synced.delete(w)
slowWatcherGauge.Inc() slowWatcherGauge.Inc()
} }
// always update minRev
// in case 'send' returns true and watcher stays synced, this is needed for Restore when all watchers become unsynced
// in case 'send' returns false, this is needed for syncWatchers
w.minRev = rev + 1
} }
s.addVictim(victim) s.addVictim(victim)
} }
@@ -474,14 +479,34 @@ func (s *watchableStore) addVictim(victim watcherBatch) {
func (s *watchableStore) rev() int64 { return s.store.Rev() } func (s *watchableStore) rev() int64 { return s.store.Rev() }
func (s *watchableStore) progress(w *watcher) { func (s *watchableStore) progress(w *watcher) {
s.progressIfSync(map[WatchID]*watcher{w.id: w}, w.id)
}
func (s *watchableStore) progressAll(watchers map[WatchID]*watcher) bool {
return s.progressIfSync(watchers, clientv3.InvalidWatchID)
}
func (s *watchableStore) progressIfSync(watchers map[WatchID]*watcher, responseWatchID WatchID) bool {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()
if _, ok := s.synced.watchers[w]; ok { // Any watcher unsynced?
w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) for _, w := range watchers {
// If the ch is full, this watcher is receiving events. if _, ok := s.synced.watchers[w]; !ok {
// We do not need to send progress at all. return false
}
} }
// If all watchers are synchronised, send out progress
// notification on first watcher. Note that all watchers
// should have the same underlying stream, and the progress
// notification will be broadcasted client-side if required
// (see dispatchEvent in client/v3/watch.go)
for _, w := range watchers {
w.send(WatchResponse{WatchID: responseWatchID, Revision: s.rev()})
return true
}
return true
} }
type watcher struct { type watcher struct {

View File

@@ -58,6 +58,13 @@ type WatchStream interface {
// of the watchers since the watcher is currently synced. // of the watchers since the watcher is currently synced.
RequestProgress(id WatchID) RequestProgress(id WatchID)
// RequestProgressAll requests a progress notification for all
// watchers sharing the stream. If all watchers are synced, a
// progress notification with watch ID -1 will be sent to an
// arbitrary watcher of this stream, and the function returns
// true.
RequestProgressAll() bool
// Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
// returned. // returned.
Cancel(id WatchID) error Cancel(id WatchID) error
@@ -188,3 +195,9 @@ func (ws *watchStream) RequestProgress(id WatchID) {
} }
ws.watchable.progress(w) ws.watchable.progress(w)
} }
func (ws *watchStream) RequestProgressAll() bool {
ws.mu.Lock()
defer ws.mu.Unlock()
return ws.watchable.progressAll(ws.watchers)
}

View File

@@ -70,6 +70,5 @@ snapshot to the end of the WAL are read first:
This will give you the metadata, the last raft.State and the slice of This will give you the metadata, the last raft.State and the slice of
raft.Entry items in the log. raft.Entry items in the log.
*/ */
package wal package wal

View File

@@ -939,7 +939,10 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
} }
if curOff < SegmentSizeBytes { if curOff < SegmentSizeBytes {
if mustSync { if mustSync {
return w.sync() // gofail: var walBeforeSync struct{}
err = w.sync()
// gofail: var walAfterSync struct{}
return err
} }
return nil return nil
} }

32
vendor/modules.txt vendored
View File

@@ -725,11 +725,11 @@ github.com/xiang90/probing
# github.com/xlab/treeprint v1.2.0 # github.com/xlab/treeprint v1.2.0
## explicit; go 1.13 ## explicit; go 1.13
github.com/xlab/treeprint github.com/xlab/treeprint
# go.etcd.io/bbolt v1.3.6 # go.etcd.io/bbolt v1.3.7
## explicit; go 1.12
go.etcd.io/bbolt
# go.etcd.io/etcd/api/v3 v3.5.7
## explicit; go 1.17 ## explicit; go 1.17
go.etcd.io/bbolt
# go.etcd.io/etcd/api/v3 v3.5.8
## explicit; go 1.19
go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/authpb
go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/etcdserverpb
go.etcd.io/etcd/api/v3/etcdserverpb/gw go.etcd.io/etcd/api/v3/etcdserverpb/gw
@@ -737,8 +737,8 @@ go.etcd.io/etcd/api/v3/membershippb
go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/mvccpb
go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/v3rpc/rpctypes
go.etcd.io/etcd/api/v3/version go.etcd.io/etcd/api/v3/version
# go.etcd.io/etcd/client/pkg/v3 v3.5.7 # go.etcd.io/etcd/client/pkg/v3 v3.5.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/fileutil
go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/logutil
go.etcd.io/etcd/client/pkg/v3/pathutil go.etcd.io/etcd/client/pkg/v3/pathutil
@@ -747,18 +747,18 @@ go.etcd.io/etcd/client/pkg/v3/systemd
go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/tlsutil
go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/transport
go.etcd.io/etcd/client/pkg/v3/types go.etcd.io/etcd/client/pkg/v3/types
# go.etcd.io/etcd/client/v2 v2.305.7 # go.etcd.io/etcd/client/v2 v2.305.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/client/v2 go.etcd.io/etcd/client/v2
# go.etcd.io/etcd/client/v3 v3.5.7 # go.etcd.io/etcd/client/v3 v3.5.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3
go.etcd.io/etcd/client/v3/concurrency go.etcd.io/etcd/client/v3/concurrency
go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/credentials
go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/endpoint
go.etcd.io/etcd/client/v3/internal/resolver go.etcd.io/etcd/client/v3/internal/resolver
# go.etcd.io/etcd/pkg/v3 v3.5.7 # go.etcd.io/etcd/pkg/v3 v3.5.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/pkg/v3/adt go.etcd.io/etcd/pkg/v3/adt
go.etcd.io/etcd/pkg/v3/contention go.etcd.io/etcd/pkg/v3/contention
go.etcd.io/etcd/pkg/v3/cpuutil go.etcd.io/etcd/pkg/v3/cpuutil
@@ -774,15 +774,15 @@ go.etcd.io/etcd/pkg/v3/runtime
go.etcd.io/etcd/pkg/v3/schedule go.etcd.io/etcd/pkg/v3/schedule
go.etcd.io/etcd/pkg/v3/traceutil go.etcd.io/etcd/pkg/v3/traceutil
go.etcd.io/etcd/pkg/v3/wait go.etcd.io/etcd/pkg/v3/wait
# go.etcd.io/etcd/raft/v3 v3.5.7 # go.etcd.io/etcd/raft/v3 v3.5.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/raft/v3 go.etcd.io/etcd/raft/v3
go.etcd.io/etcd/raft/v3/confchange go.etcd.io/etcd/raft/v3/confchange
go.etcd.io/etcd/raft/v3/quorum go.etcd.io/etcd/raft/v3/quorum
go.etcd.io/etcd/raft/v3/raftpb go.etcd.io/etcd/raft/v3/raftpb
go.etcd.io/etcd/raft/v3/tracker go.etcd.io/etcd/raft/v3/tracker
# go.etcd.io/etcd/server/v3 v3.5.7 # go.etcd.io/etcd/server/v3 v3.5.8
## explicit; go 1.17 ## explicit; go 1.19
go.etcd.io/etcd/server/v3/auth go.etcd.io/etcd/server/v3/auth
go.etcd.io/etcd/server/v3/config go.etcd.io/etcd/server/v3/config
go.etcd.io/etcd/server/v3/datadir go.etcd.io/etcd/server/v3/datadir