Merge pull request #4926 from dims/update-k8s-to-latest-v1.20.x

Update k8s to latest v1.20.x
This commit is contained in:
Maksym Pavlenko 2021-01-12 17:45:26 -08:00 committed by GitHub
commit 124fc14a46
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
142 changed files with 31186 additions and 3012 deletions

24
go.mod
View File

@ -31,8 +31,8 @@ require (
github.com/gogo/googleapis v1.4.0 github.com/gogo/googleapis v1.4.0
github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf v1.3.1
github.com/golang/protobuf v1.4.3 github.com/golang/protobuf v1.4.3
github.com/google/go-cmp v0.5.1 github.com/google/go-cmp v0.5.2
github.com/google/uuid v1.1.1 github.com/google/uuid v1.1.2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror v1.0.0
github.com/imdario/mergo v0.3.10 github.com/imdario/mergo v0.3.10
@ -47,24 +47,24 @@ require (
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_golang v1.7.1
github.com/sirupsen/logrus v1.7.0 github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.4.0 github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.2.6+incompatible github.com/tchap/go-patricia v2.2.6+incompatible
github.com/urfave/cli v1.22.2 github.com/urfave/cli v1.22.2
go.etcd.io/bbolt v1.3.5 go.etcd.io/bbolt v1.3.5
golang.org/x/net v0.0.0-20200707034311-ab3426394381 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20201202213521-69691e467435 golang.org/x/sys v0.0.0-20201202213521-69691e467435
google.golang.org/grpc v1.30.0 google.golang.org/grpc v1.30.0
gotest.tools/v3 v3.0.2 gotest.tools/v3 v3.0.2
k8s.io/api v0.19.4 k8s.io/api v0.20.1
k8s.io/apimachinery v0.19.4 k8s.io/apimachinery v0.20.1
k8s.io/apiserver v0.19.4 k8s.io/apiserver v0.20.1
k8s.io/client-go v0.19.4 k8s.io/client-go v0.20.1
k8s.io/component-base v0.19.4 k8s.io/component-base v0.20.1
k8s.io/cri-api v0.20.0-beta.1.0.20201105173512-3990421b69a0 k8s.io/cri-api v0.20.1
k8s.io/klog/v2 v2.2.0 k8s.io/klog/v2 v2.4.0
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 k8s.io/utils v0.0.0-20201110183641-67b214c5f920
) )
replace ( replace (

172
go.sum
View File

@ -5,26 +5,34 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@ -43,9 +51,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da h1:sqPBuX6WumurdHaSRbS8xyyyP8Rf7kUpLUlJaN7rztw= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da h1:sqPBuX6WumurdHaSRbS8xyyyP8Rf7kUpLUlJaN7rztw=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -53,6 +59,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -62,7 +69,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
@ -198,6 +205,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@ -206,7 +214,9 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -215,15 +225,11 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -247,9 +253,13 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -258,8 +268,11 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -267,10 +280,14 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
@ -278,7 +295,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
@ -328,7 +345,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
@ -436,6 +452,8 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
@ -476,6 +494,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -502,11 +522,13 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -520,26 +542,37 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -555,18 +588,28 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -592,10 +635,12 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -605,34 +650,43 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435 h1:25AvDqqB9PrNqj1FLf2/70I4W0L19qqoaFq3gjNwbKk= golang.org/x/sys v0.0.0-20201202213521-69691e467435 h1:25AvDqqB9PrNqj1FLf2/70I4W0L19qqoaFq3gjNwbKk=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@ -649,18 +703,38 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
@ -671,8 +745,7 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -702,6 +775,8 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
@ -711,31 +786,34 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
k8s.io/apiserver v0.19.4 h1:X40UuyVt6DcYWIh2olcePkyKO0LRJFvxWC0kLxYvkZU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apiserver v0.19.4/go.mod h1:X8WRHCR1UGZDd7HpV0QDc1h/6VbbpAeAGyxSh8yzZXw= k8s.io/apiserver v0.20.1 h1:yEqdkxlnQbxi/3e74cp0X16h140fpvPrNnNRAJBDuBk=
k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= k8s.io/client-go v0.20.1 h1:Qquik0xNFbK9aUG92pxHYsyfea5/RPO9o9bSywNor+M=
k8s.io/component-base v0.19.4 h1:HobPRToQ8KJ9ubRju6PUAk9I5V1GNMJZ4PyWbiWA0uI= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/component-base v0.19.4/go.mod h1:ZzuSLlsWhajIDEkKF73j64Gz/5o0AgON08FgRbEPI70= k8s.io/component-base v0.20.1 h1:6OQaHr205NSl24t5wOF2IhdrlxZTWEZwuGlLvBgaeIg=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.0-beta.1.0.20201105173512-3990421b69a0 h1:/AO/xlTAHFP+ZLhfYMjSUzMsZe9of1fwh1TupXAKzKE= k8s.io/cri-api v0.20.1 h1:b4l7SZ9+VPfIrrJnMXzm0HR9wAsHwHh9+QcmK31nQMI=
k8s.io/cri-api v0.20.0-beta.1.0.20201105173512-3990421b69a0/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -95,12 +95,12 @@ func Equal(x, y interface{}, opts ...Option) bool {
return s.result.Equal() return s.result.Equal()
} }
// Diff returns a human-readable report of the differences between two values. // Diff returns a human-readable report of the differences between two values:
// It returns an empty string if and only if Equal returns true for the same // y - x. It returns an empty string if and only if Equal returns true for the
// input values and options. // same input values and options.
// //
// The output is displayed as a literal in pseudo-Go syntax. // The output is displayed as a literal in pseudo-Go syntax.
// At the start of each line, a "-" prefix indicates an element removed from x, // At the start of each line, a "-" prefix indicates an element removed from y,
// a "+" prefix to indicates an element added to y, and the lack of a prefix // a "+" prefix to indicates an element added to y, and the lack of a prefix
// indicates an element common to both x and y. If possible, the output // indicates an element common to both x and y. If possible, the output
// uses fmt.Stringer.String or error.Error methods to produce more humanly // uses fmt.Stringer.String or error.Error methods to produce more humanly

View File

@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
// Unable to Interface implies unexported field without visibility access. // Unable to Interface implies unexported field without visibility access.
if !vx.CanInterface() || !vy.CanInterface() { if !vx.CanInterface() || !vy.CanInterface() {
const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
var name string var name string
if t := s.curPath.Index(-2).Type(); t.Name() != "" { if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields. // Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
}
} else { } else {
// Unnamed type with unexported fields. Derive PkgPath from field. // Unnamed type with unexported fields. Derive PkgPath from field.
var pkgPath string var pkgPath string

View File

@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID).
Full `go doc` style documentation for the package can be viewed online without Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here: installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid http://pkg.go.dev/github.com/google/uuid

View File

@ -16,11 +16,12 @@ func (uuid UUID) MarshalText() ([]byte, error) {
// UnmarshalText implements encoding.TextUnmarshaler. // UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error { func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data) id, err := ParseBytes(data)
if err == nil { if err != nil {
*uuid = id
}
return err return err
} }
*uuid = id
return nil
}
// MarshalBinary implements encoding.BinaryMarshaler. // MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) { func (uuid UUID) MarshalBinary() ([]byte, error) {

View File

@ -17,12 +17,6 @@ import (
// //
// In most cases, New should be used. // In most cases, New should be used.
func NewUUID() (UUID, error) { func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID var uuid UUID
now, seq, err := GetTime() now, seq, err := GetTime()
if err != nil { if err != nil {
@ -38,7 +32,13 @@ func NewUUID() (UUID, error) {
binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq) binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:]) copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil return uuid, nil
} }

View File

@ -27,8 +27,13 @@ func New() UUID {
// equivalent to the odds of creating a few tens of trillions of UUIDs in a // equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate. // year and having one duplicate.
func NewRandom() (UUID, error) { func NewRandom() (UUID, error) {
return NewRandomFromReader(rander)
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID var uuid UUID
_, err := io.ReadFull(rander, uuid[:]) _, err := io.ReadFull(r, uuid[:])
if err != nil { if err != nil {
return Nil, err return Nil, err
} }

View File

@ -0,0 +1,3 @@
## Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -407,6 +407,50 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil return cpuinfo, nil
} }
func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
case "hart":
cpuinfo[i].CoreID = field[1]
case "isa":
cpuinfo[i].ModelName = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
return nil, errors.New("not implemented")
}
// firstNonEmptyLine advances the scanner to the first non-empty line // firstNonEmptyLine advances the scanner to the first non-empty line
// and returns the contents of that line // and returns the contents of that line
func firstNonEmptyLine(scanner *bufio.Scanner) string { func firstNonEmptyLine(scanner *bufio.Scanner) string {

View File

@ -12,6 +12,7 @@
// limitations under the License. // limitations under the License.
// +build linux // +build linux
// +build arm arm64
package procfs package procfs

View File

@ -1,18 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoMips

View File

@ -1,18 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoMips

View File

@ -1,18 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoMips

View File

@ -12,6 +12,7 @@
// limitations under the License. // limitations under the License.
// +build linux // +build linux
// +build mips mipsle mips64 mips64le
package procfs package procfs

View File

@ -12,8 +12,8 @@
// limitations under the License. // limitations under the License.
// +build linux // +build linux
// +build arm64 // +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs package procfs
var parseCPUInfo = parseCPUInfoARM var parseCPUInfo = parseCPUInfoDummy

View File

@ -1,18 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoPPC

View File

@ -12,6 +12,7 @@
// limitations under the License. // limitations under the License.
// +build linux // +build linux
// +build ppc64 ppc64le
package procfs package procfs

View File

@ -467,7 +467,7 @@ Pid: 26231
PPid: 1 PPid: 1
TracerPid: 0 TracerPid: 0
Uid: 1000 1000 1000 0 Uid: 1000 1000 1000 0
Gid: 0 0 0 0 Gid: 1001 1001 1001 0
FDSize: 128 FDSize: 128
Groups: Groups:
NStgid: 1 NStgid: 1
@ -1966,7 +1966,7 @@ Lines: 1
Mode: 444 Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/mdstat Path: fixtures/proc/mdstat
Lines: 56 Lines: 60
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S)
@ -1989,6 +1989,10 @@ md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S)
195310144 blocks [2/2] [UU] 195310144 blocks [2/2] [UU]
[=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
md201 : active raid1 sda3[0] sdb3[1]
1993728 blocks super 1.2 [2/2] [UU]
[=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec
md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F)
7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
bitmap: 0/30 pages [0KB], 65536KB chunk bitmap: 0/30 pages [0KB], 65536KB chunk
@ -3754,6 +3758,73 @@ Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent
Lines: 0 Lines: 0
Mode: 644 Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/class/powercap/intel-rapl:a
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw
Lines: 1
95000000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name
Lines: 1
long_term
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw
Lines: 1
4090000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us
Lines: 1
999424
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name
Lines: 1
short_term
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw
Lines: 1
4090000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us
Lines: 1
2440
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/enabled
Lines: 1
1
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj
Lines: 1
240422366267
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj
Lines: 1
262143328850
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/name
Lines: 1
package-10
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/powercap/intel-rapl:a/uevent
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/class/thermal Directory: fixtures/sys/class/thermal
Mode: 775 Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

View File

@ -25,7 +25,7 @@ import (
type KernelRandom struct { type KernelRandom struct {
// EntropyAvaliable gives the available entropy, in bits. // EntropyAvaliable gives the available entropy, in bits.
EntropyAvaliable *uint64 EntropyAvaliable *uint64
// PoolSize gives the size of the entropy pool, in bytes. // PoolSize gives the size of the entropy pool, in bits.
PoolSize *uint64 PoolSize *uint64
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
URandomMinReseedSeconds *uint64 URandomMinReseedSeconds *uint64

View File

@ -107,11 +107,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
syncedBlocks := size syncedBlocks := size
recovering := strings.Contains(lines[syncLineIdx], "recovery") recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync") resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
// Append recovery and resyncing state info. // Append recovery and resyncing state info.
if recovering || resyncing { if recovering || resyncing || checking {
if recovering { if recovering {
state = "recovering" state = "recovering"
} else if checking {
state = "checking"
} else { } else {
state = "resyncing" state = "resyncing"
} }

View File

@ -72,8 +72,10 @@ type ProcStatus struct {
// Number of involuntary context switches. // Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64 NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
} }
// NewStatus returns the current status information of the process. // NewStatus returns the current status information of the process.
@ -119,6 +121,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.Name = vString s.Name = vString
case "Uid": case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t")) copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "VmPeak": case "VmPeak":
s.VmPeak = vUintBytes s.VmPeak = vUintBytes
case "VmSize": case "VmSize":

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -5,20 +5,28 @@ import (
"reflect" "reflect"
) )
func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { type CompareType int
const (
compareLess CompareType = iota - 1
compareEqual
compareGreater
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
switch kind { switch kind {
case reflect.Int: case reflect.Int:
{ {
intobj1 := obj1.(int) intobj1 := obj1.(int)
intobj2 := obj2.(int) intobj2 := obj2.(int)
if intobj1 > intobj2 { if intobj1 > intobj2 {
return -1, true return compareGreater, true
} }
if intobj1 == intobj2 { if intobj1 == intobj2 {
return 0, true return compareEqual, true
} }
if intobj1 < intobj2 { if intobj1 < intobj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Int8: case reflect.Int8:
@ -26,13 +34,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int8obj1 := obj1.(int8) int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8) int8obj2 := obj2.(int8)
if int8obj1 > int8obj2 { if int8obj1 > int8obj2 {
return -1, true return compareGreater, true
} }
if int8obj1 == int8obj2 { if int8obj1 == int8obj2 {
return 0, true return compareEqual, true
} }
if int8obj1 < int8obj2 { if int8obj1 < int8obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Int16: case reflect.Int16:
@ -40,13 +48,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int16obj1 := obj1.(int16) int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16) int16obj2 := obj2.(int16)
if int16obj1 > int16obj2 { if int16obj1 > int16obj2 {
return -1, true return compareGreater, true
} }
if int16obj1 == int16obj2 { if int16obj1 == int16obj2 {
return 0, true return compareEqual, true
} }
if int16obj1 < int16obj2 { if int16obj1 < int16obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Int32: case reflect.Int32:
@ -54,13 +62,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int32obj1 := obj1.(int32) int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32) int32obj2 := obj2.(int32)
if int32obj1 > int32obj2 { if int32obj1 > int32obj2 {
return -1, true return compareGreater, true
} }
if int32obj1 == int32obj2 { if int32obj1 == int32obj2 {
return 0, true return compareEqual, true
} }
if int32obj1 < int32obj2 { if int32obj1 < int32obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Int64: case reflect.Int64:
@ -68,13 +76,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int64obj1 := obj1.(int64) int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64) int64obj2 := obj2.(int64)
if int64obj1 > int64obj2 { if int64obj1 > int64obj2 {
return -1, true return compareGreater, true
} }
if int64obj1 == int64obj2 { if int64obj1 == int64obj2 {
return 0, true return compareEqual, true
} }
if int64obj1 < int64obj2 { if int64obj1 < int64obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Uint: case reflect.Uint:
@ -82,13 +90,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uintobj1 := obj1.(uint) uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint) uintobj2 := obj2.(uint)
if uintobj1 > uintobj2 { if uintobj1 > uintobj2 {
return -1, true return compareGreater, true
} }
if uintobj1 == uintobj2 { if uintobj1 == uintobj2 {
return 0, true return compareEqual, true
} }
if uintobj1 < uintobj2 { if uintobj1 < uintobj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Uint8: case reflect.Uint8:
@ -96,13 +104,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint8obj1 := obj1.(uint8) uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8) uint8obj2 := obj2.(uint8)
if uint8obj1 > uint8obj2 { if uint8obj1 > uint8obj2 {
return -1, true return compareGreater, true
} }
if uint8obj1 == uint8obj2 { if uint8obj1 == uint8obj2 {
return 0, true return compareEqual, true
} }
if uint8obj1 < uint8obj2 { if uint8obj1 < uint8obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Uint16: case reflect.Uint16:
@ -110,13 +118,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint16obj1 := obj1.(uint16) uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16) uint16obj2 := obj2.(uint16)
if uint16obj1 > uint16obj2 { if uint16obj1 > uint16obj2 {
return -1, true return compareGreater, true
} }
if uint16obj1 == uint16obj2 { if uint16obj1 == uint16obj2 {
return 0, true return compareEqual, true
} }
if uint16obj1 < uint16obj2 { if uint16obj1 < uint16obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Uint32: case reflect.Uint32:
@ -124,13 +132,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint32obj1 := obj1.(uint32) uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32) uint32obj2 := obj2.(uint32)
if uint32obj1 > uint32obj2 { if uint32obj1 > uint32obj2 {
return -1, true return compareGreater, true
} }
if uint32obj1 == uint32obj2 { if uint32obj1 == uint32obj2 {
return 0, true return compareEqual, true
} }
if uint32obj1 < uint32obj2 { if uint32obj1 < uint32obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Uint64: case reflect.Uint64:
@ -138,13 +146,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint64obj1 := obj1.(uint64) uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64) uint64obj2 := obj2.(uint64)
if uint64obj1 > uint64obj2 { if uint64obj1 > uint64obj2 {
return -1, true return compareGreater, true
} }
if uint64obj1 == uint64obj2 { if uint64obj1 == uint64obj2 {
return 0, true return compareEqual, true
} }
if uint64obj1 < uint64obj2 { if uint64obj1 < uint64obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Float32: case reflect.Float32:
@ -152,13 +160,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float32obj1 := obj1.(float32) float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32) float32obj2 := obj2.(float32)
if float32obj1 > float32obj2 { if float32obj1 > float32obj2 {
return -1, true return compareGreater, true
} }
if float32obj1 == float32obj2 { if float32obj1 == float32obj2 {
return 0, true return compareEqual, true
} }
if float32obj1 < float32obj2 { if float32obj1 < float32obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.Float64: case reflect.Float64:
@ -166,13 +174,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float64obj1 := obj1.(float64) float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64) float64obj2 := obj2.(float64)
if float64obj1 > float64obj2 { if float64obj1 > float64obj2 {
return -1, true return compareGreater, true
} }
if float64obj1 == float64obj2 { if float64obj1 == float64obj2 {
return 0, true return compareEqual, true
} }
if float64obj1 < float64obj2 { if float64obj1 < float64obj2 {
return 1, true return compareLess, true
} }
} }
case reflect.String: case reflect.String:
@ -180,18 +188,18 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
stringobj1 := obj1.(string) stringobj1 := obj1.(string)
stringobj2 := obj2.(string) stringobj2 := obj2.(string)
if stringobj1 > stringobj2 { if stringobj1 > stringobj2 {
return -1, true return compareGreater, true
} }
if stringobj1 == stringobj2 { if stringobj1 == stringobj2 {
return 0, true return compareEqual, true
} }
if stringobj1 < stringobj2 { if stringobj1 < stringobj2 {
return 1, true return compareLess, true
} }
} }
} }
return 0, false return compareEqual, false
} }
// Greater asserts that the first element is greater than the second // Greater asserts that the first element is greater than the second
@ -200,26 +208,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
// assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a") // assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != -1 {
return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
}
return true
} }
// GreaterOrEqual asserts that the first element is greater than or equal to the second // GreaterOrEqual asserts that the first element is greater than or equal to the second
@ -229,26 +218,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b") // assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != -1 && res != 0 {
return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
}
return true
} }
// Less asserts that the first element is less than the second // Less asserts that the first element is less than the second
@ -257,26 +227,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2)) // assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b") // assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != 1 {
return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
}
return true
} }
// LessOrEqual asserts that the first element is less than or equal to the second // LessOrEqual asserts that the first element is less than or equal to the second
@ -286,6 +237,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b") // assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
@ -296,14 +251,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
return Fail(t, "Elements should be the same type", msgAndArgs...) return Fail(t, "Elements should be the same type", msgAndArgs...)
} }
res, isComparable := compare(e1, e2, e1Kind) compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable { if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
} }
if res != 1 && res != 0 { if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
} }
return true return true
} }
func containsValue(values []CompareType, value CompareType) bool {
for _, v := range values {
if v == value {
return true
}
}
return false
}

View File

@ -32,7 +32,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
return Contains(t, s, contains, append([]interface{}{msg}, args...)...) return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
} }
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExistsf checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -92,7 +93,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -126,7 +127,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// Exactlyf asserts that two objects are equal in value and type. // Exactlyf asserts that two objects are equal in value and type.
// //
// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) // assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -160,7 +161,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
return False(t, value, append([]interface{}{msg}, args...)...) return False(t, value, append([]interface{}{msg}, args...)...)
} }
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExistsf checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -171,7 +173,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// Greaterf asserts that the first element is greater than the second // Greaterf asserts that the first element is greater than the second
// //
// assert.Greaterf(t, 2, 1, "error message %s", "formatted") // assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) // assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted") // assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -223,7 +225,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
// //
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -235,7 +237,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
// //
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -243,6 +245,18 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
} }
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
}
// HTTPSuccessf asserts that a specified handler returns a success status code. // HTTPSuccessf asserts that a specified handler returns a success status code.
// //
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
@ -257,7 +271,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implementsf asserts that an object is implemented by the specified interface. // Implementsf asserts that an object is implemented by the specified interface.
// //
// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) // assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -267,7 +281,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDeltaf asserts that the two numerals are within delta of each other. // InDeltaf asserts that the two numerals are within delta of each other.
// //
// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) // assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -325,14 +339,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Lenf asserts that the specified object has specific length. // Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept. // Lenf also fails if the object has a type that len() not accept.
// //
@ -347,7 +353,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Lessf asserts that the first element is less than the second // Lessf asserts that the first element is less than the second
// //
// assert.Lessf(t, 1, 2, "error message %s", "formatted") // assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) // assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted") // assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -369,6 +375,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
} }
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// Nilf asserts that the specified object is nil. // Nilf asserts that the specified object is nil.
// //
// assert.Nilf(t, err, "error message %s", "formatted") // assert.Nilf(t, err, "error message %s", "formatted")
@ -379,6 +396,15 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool
return Nil(t, object, append([]interface{}{msg}, args...)...) return Nil(t, object, append([]interface{}{msg}, args...)...)
} }
// NoDirExistsf checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
}
// NoErrorf asserts that a function returned no error (i.e. `nil`). // NoErrorf asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -392,6 +418,15 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
return NoError(t, err, append([]interface{}{msg}, args...)...) return NoError(t, err, append([]interface{}{msg}, args...)...)
} }
// NoFileExistsf checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
}
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -431,6 +466,16 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil. // NotNilf asserts that the specified object is not nil.
// //
// assert.NotNilf(t, err, "error message %s", "formatted") // assert.NotNilf(t, err, "error message %s", "formatted")
@ -453,7 +498,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
// NotRegexpf asserts that a specified regexp does not match a string. // NotRegexpf asserts that a specified regexp does not match a string.
// //
// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -462,6 +507,19 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
} }
// NotSamef asserts that two pointers do not reference the same object.
//
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all // NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...). // elements given in the specified subset(array, slice...).
// //
@ -491,6 +549,18 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool
return Panics(t, f, append([]interface{}{msg}, args...)...) return Panics(t, f, append([]interface{}{msg}, args...)...)
} }
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
}
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value. // the recovered panic value equals the expected panic value.
// //
@ -504,7 +574,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
// Regexpf asserts that a specified regexp matches a string. // Regexpf asserts that a specified regexp matches a string.
// //
// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -557,6 +627,14 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
} }
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Zerof asserts that i is the zero value for its type. // Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {

View File

@ -53,7 +53,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
return Containsf(a.t, s, contains, msg, args...) return Containsf(a.t, s, contains, msg, args...)
} }
// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExists checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -61,7 +62,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
return DirExists(a.t, path, msgAndArgs...) return DirExists(a.t, path, msgAndArgs...)
} }
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExistsf checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -167,7 +169,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -249,7 +251,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type. // Exactlyf asserts that two objects are equal in value and type.
// //
// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) // a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -309,7 +311,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
return Falsef(a.t, value, msg, args...) return Falsef(a.t, value, msg, args...)
} }
// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExists checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -317,7 +320,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
return FileExists(a.t, path, msgAndArgs...) return FileExists(a.t, path, msgAndArgs...)
} }
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExistsf checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -366,7 +370,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second // Greaterf asserts that the first element is greater than the second
// //
// a.Greaterf(2, 1, "error message %s", "formatted") // a.Greaterf(2, 1, "error message %s", "formatted")
// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) // a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted") // a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -443,7 +447,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
// //
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -467,7 +471,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
// //
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -475,6 +479,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
} }
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
}
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
}
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@ -511,7 +539,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface. // Implementsf asserts that an object is implemented by the specified interface.
// //
// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) // a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -521,7 +549,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// a.InDelta(math.Pi, (22 / 7.0), 0.01) // a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -563,7 +591,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other. // InDeltaf asserts that the two numerals are within delta of each other.
// //
// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) // a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -639,22 +667,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
return JSONEqf(a.t, expected, actual, msg, args...) return JSONEqf(a.t, expected, actual, msg, args...)
} }
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEqf(a.t, expected, actual, msg, args...)
}
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -718,7 +730,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second // Lessf asserts that the first element is less than the second
// //
// a.Lessf(1, 2, "error message %s", "formatted") // a.Lessf(1, 2, "error message %s", "formatted")
// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) // a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted") // a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -727,6 +739,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...) return Lessf(a.t, e1, e2, msg, args...)
} }
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Never(a.t, condition, waitFor, tick, msgAndArgs...)
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Neverf(a.t, condition, waitFor, tick, msg, args...)
}
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// a.Nil(err) // a.Nil(err)
@ -747,6 +781,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b
return Nilf(a.t, object, msg, args...) return Nilf(a.t, object, msg, args...)
} }
// NoDirExists checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NoDirExists(a.t, path, msgAndArgs...)
}
// NoDirExistsf checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NoDirExistsf(a.t, path, msg, args...)
}
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -773,6 +825,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
return NoErrorf(a.t, err, msg, args...) return NoErrorf(a.t, err, msg, args...)
} }
// NoFileExists checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NoFileExists(a.t, path, msgAndArgs...)
}
// NoFileExistsf checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NoFileExistsf(a.t, path, msg, args...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -838,6 +908,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...) return NotEqual(a.t, expected, actual, msgAndArgs...)
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
// a.NotEqualValues(obj1, obj2)
func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotEqualValues(a.t, expected, actual, msgAndArgs...)
}
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotEqualValuesf(a.t, expected, actual, msg, args...)
}
// NotEqualf asserts that the specified values are NOT equal. // NotEqualf asserts that the specified values are NOT equal.
// //
// a.NotEqualf(obj1, obj2, "error message %s", "formatted") // a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@ -904,7 +994,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string. // NotRegexpf asserts that a specified regexp does not match a string.
// //
// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -913,6 +1003,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
return NotRegexpf(a.t, rx, str, msg, args...) return NotRegexpf(a.t, rx, str, msg, args...)
} }
// NotSame asserts that two pointers do not reference the same object.
//
// a.NotSame(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotSame(a.t, expected, actual, msgAndArgs...)
}
// NotSamef asserts that two pointers do not reference the same object.
//
// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotSamef(a.t, expected, actual, msg, args...)
}
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...). // elements given in the specified subset(array, slice...).
// //
@ -961,6 +1077,30 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...) return Panics(a.t, f, msgAndArgs...)
} }
// PanicsWithError asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// a.PanicsWithError("crazy error", func(){ GoCrazy() })
func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return PanicsWithError(a.t, errString, f, msgAndArgs...)
}
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return PanicsWithErrorf(a.t, errString, f, msg, args...)
}
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value. // the recovered panic value equals the expected panic value.
// //
@ -1006,7 +1146,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string. // Regexpf asserts that a specified regexp matches a string.
// //
// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -1103,6 +1243,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...) return WithinDurationf(a.t, expected, actual, delta, msg, args...)
} }
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEqf(a.t, expected, actual, msg, args...)
}
// Zero asserts that i is the zero value for its type. // Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {

View File

@ -11,6 +11,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"runtime" "runtime"
"runtime/debug"
"strings" "strings"
"time" "time"
"unicode" "unicode"
@ -18,10 +19,10 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib" "github.com/pmezard/go-difflib/difflib"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v3"
) )
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
// TestingT is an interface wrapper around *testing.T // TestingT is an interface wrapper around *testing.T
type TestingT interface { type TestingT interface {
@ -44,7 +45,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests. // for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
// Comparison a custom function that returns true on success and false on failure // Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool) type Comparison func() (success bool)
/* /*
@ -103,11 +104,11 @@ the problem actually occurred in calling code.*/
// failed. // failed.
func CallerInfo() []string { func CallerInfo() []string {
pc := uintptr(0) var pc uintptr
file := "" var ok bool
line := 0 var file string
ok := false var line int
name := "" var name string
callers := []string{} callers := []string{}
for i := 0; ; i++ { for i := 0; ; i++ {
@ -351,6 +352,19 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
} }
// validateEqualArgs checks whether provided arguments can be safely used in the
// Equal/NotEqual functions.
func validateEqualArgs(expected, actual interface{}) error {
if expected == nil && actual == nil {
return nil
}
if isFunction(expected) || isFunction(actual) {
return errors.New("cannot take func type as argument")
}
return nil
}
// Same asserts that two pointers reference the same object. // Same asserts that two pointers reference the same object.
// //
// assert.Same(t, ptr1, ptr2) // assert.Same(t, ptr1, ptr2)
@ -362,18 +376,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper() h.Helper()
} }
expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) if !samePointers(expected, actual) {
if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
}
expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
if expectedType != actualType {
return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
expectedType, actualType), msgAndArgs...)
}
if expected != actual {
return Fail(t, fmt.Sprintf("Not same: \n"+ return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+ "expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@ -382,6 +385,42 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
return true return true
} }
// NotSame asserts that two pointers do not reference the same object.
//
// assert.NotSame(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if samePointers(expected, actual) {
return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v",
expected, expected), msgAndArgs...)
}
return true
}
// samePointers compares two generic interface objects and returns whether
// they point to the same object
func samePointers(first, second interface{}) bool {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
return false
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
if firstType != secondType {
return false
}
// compare pointer addresses
return first == second
}
// formatUnequalValues takes two values of arbitrary types and returns string // formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user. // representations appropriate to be presented to the user.
// //
@ -390,12 +429,27 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
// to a type conversion in the Go grammar. // to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) { func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
return fmt.Sprintf("%T(%#v)", expected, expected), return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
fmt.Sprintf("%T(%#v)", actual, actual) fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
}
switch expected.(type) {
case time.Duration:
return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
}
return truncatingFormat(expected), truncatingFormat(actual)
} }
return fmt.Sprintf("%#v", expected), // truncatingFormat formats the data and truncates it if it's too long.
fmt.Sprintf("%#v", actual) //
// This helps keep formatted error messages lines from exceeding the
// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
func truncatingFormat(data interface{}) string {
value := fmt.Sprintf("%#v", data)
max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
if len(value) > max {
value = value[0:max] + "<... truncated>"
}
return value
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertable to the same types
@ -442,12 +496,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
// //
// assert.NotNil(t, err) // assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if !isNil(object) { if !isNil(object) {
return true return true
} }
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Fail(t, "Expected value not to be nil.", msgAndArgs...) return Fail(t, "Expected value not to be nil.", msgAndArgs...)
} }
@ -488,12 +542,12 @@ func isNil(object interface{}) bool {
// //
// assert.Nil(t, err) // assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if isNil(object) { if isNil(object) {
return true return true
} }
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
} }
@ -530,12 +584,11 @@ func isEmpty(object interface{}) bool {
// //
// assert.Empty(t, obj) // assert.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object)
if !pass {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
pass := isEmpty(object)
if !pass {
Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
} }
@ -550,12 +603,11 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
// } // }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := !isEmpty(object)
if !pass {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
pass := !isEmpty(object)
if !pass {
Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
} }
@ -598,16 +650,10 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
// //
// assert.True(t, myBool) // assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if !value {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if h, ok := t.(interface {
Helper()
}); ok {
h.Helper()
}
if value != true {
return Fail(t, "Should be true", msgAndArgs...) return Fail(t, "Should be true", msgAndArgs...)
} }
@ -619,11 +665,10 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// //
// assert.False(t, myBool) // assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if value {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if value != false {
return Fail(t, "Should be false", msgAndArgs...) return Fail(t, "Should be false", msgAndArgs...)
} }
@ -654,6 +699,21 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValues(t, obj1, obj2)
func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if ObjectsAreEqualValues(expected, actual) {
return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
}
return true
}
// containsElement try loop over the list check if the list includes the element. // containsElement try loop over the list check if the list includes the element.
// return (false, false) if impossible. // return (false, false) if impossible.
// return (true, false) if element was not found. // return (true, false) if element was not found.
@ -706,10 +766,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
ok, found := includeElement(s, contains) ok, found := includeElement(s, contains)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
} }
if !found { if !found {
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
} }
return true return true
@ -840,27 +900,39 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
return true return true
} }
aKind := reflect.TypeOf(listA).Kind() if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
bKind := reflect.TypeOf(listB).Kind() return false
if aKind != reflect.Array && aKind != reflect.Slice {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
} }
if bKind != reflect.Array && bKind != reflect.Slice { extraA, extraB := diffLists(listA, listB)
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
if len(extraA) == 0 && len(extraB) == 0 {
return true
} }
return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
}
// isList checks that the provided value is array or slice.
func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
kind := reflect.TypeOf(list).Kind()
if kind != reflect.Array && kind != reflect.Slice {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
msgAndArgs...)
}
return true
}
// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
aValue := reflect.ValueOf(listA) aValue := reflect.ValueOf(listA)
bValue := reflect.ValueOf(listB) bValue := reflect.ValueOf(listB)
aLen := aValue.Len() aLen := aValue.Len()
bLen := bValue.Len() bLen := bValue.Len()
if aLen != bLen {
return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
}
// Mark indexes in bValue that we already used // Mark indexes in bValue that we already used
visited := make([]bool, bLen) visited := make([]bool, bLen)
for i := 0; i < aLen; i++ { for i := 0; i < aLen; i++ {
@ -877,11 +949,38 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
} }
} }
if !found { if !found {
return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) extraA = append(extraA, element)
} }
} }
return true for j := 0; j < bLen; j++ {
if visited[j] {
continue
}
extraB = append(extraB, bValue.Index(j).Interface())
}
return
}
func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
var msg bytes.Buffer
msg.WriteString("elements differ")
if len(extraA) > 0 {
msg.WriteString("\n\nextra elements in list A:\n")
msg.WriteString(spewConfig.Sdump(extraA))
}
if len(extraB) > 0 {
msg.WriteString("\n\nextra elements in list B:\n")
msg.WriteString(spewConfig.Sdump(extraB))
}
msg.WriteString("\n\nlistA:\n")
msg.WriteString(spewConfig.Sdump(listA))
msg.WriteString("\n\nlistB:\n")
msg.WriteString(spewConfig.Sdump(listB))
return msg.String()
} }
// Condition uses a Comparison to assert a complex condition. // Condition uses a Comparison to assert a complex condition.
@ -901,15 +1000,17 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func() type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false. // didPanic returns true if the function passed to it panics. Otherwise, it returns false.
func didPanic(f PanicTestFunc) (bool, interface{}) { func didPanic(f PanicTestFunc) (bool, interface{}, string) {
didPanic := false didPanic := false
var message interface{} var message interface{}
var stack string
func() { func() {
defer func() { defer func() {
if message = recover(); message != nil { if message = recover(); message != nil {
didPanic = true didPanic = true
stack = string(debug.Stack())
} }
}() }()
@ -918,7 +1019,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) {
}() }()
return didPanic, message return didPanic, message, stack
} }
@ -930,7 +1031,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
} }
@ -946,12 +1047,34 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr
h.Helper() h.Helper()
} }
funcDidPanic, panicValue := didPanic(f) funcDidPanic, panicValue, panickedStack := didPanic(f)
if !funcDidPanic { if !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
} }
if panicValue != expected { if panicValue != expected {
return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
}
return true
}
// PanicsWithError asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
funcDidPanic, panicValue, panickedStack := didPanic(f)
if !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
}
panicErr, ok := panicValue.(error)
if !ok || panicErr.Error() != errString {
return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
} }
return true return true
@ -965,8 +1088,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
if funcDidPanic, panicValue := didPanic(f); funcDidPanic { if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
} }
return true return true
@ -993,6 +1116,8 @@ func toFloat(x interface{}) (float64, bool) {
xok := true xok := true
switch xn := x.(type) { switch xn := x.(type) {
case uint:
xf = float64(xn)
case uint8: case uint8:
xf = float64(xn) xf = float64(xn)
case uint16: case uint16:
@ -1014,7 +1139,7 @@ func toFloat(x interface{}) (float64, bool) {
case float32: case float32:
xf = float64(xn) xf = float64(xn)
case float64: case float64:
xf = float64(xn) xf = xn
case time.Duration: case time.Duration:
xf = float64(xn) xf = float64(xn)
default: default:
@ -1026,7 +1151,7 @@ func toFloat(x interface{}) (float64, bool) {
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) // assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1128,6 +1253,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !aok { if !aok {
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
} }
if math.IsNaN(af) {
return 0, errors.New("expected value must not be NaN")
}
if af == 0 { if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
} }
@ -1135,6 +1263,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !bok { if !bok {
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
} }
if math.IsNaN(bf) {
return 0, errors.New("actual value must not be NaN")
}
return math.Abs(af-bf) / math.Abs(af), nil return math.Abs(af-bf) / math.Abs(af), nil
} }
@ -1144,6 +1275,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if math.IsNaN(epsilon) {
return Fail(t, "epsilon must not be NaN")
}
actualEpsilon, err := calcRelativeError(expected, actual) actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil { if err != nil {
return Fail(t, err.Error(), msgAndArgs...) return Fail(t, err.Error(), msgAndArgs...)
@ -1191,10 +1325,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// assert.Equal(t, expectedObj, actualObj) // assert.Equal(t, expectedObj, actualObj)
// } // }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if err != nil {
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
} }
@ -1208,11 +1342,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// assert.Equal(t, expectedError, err) // assert.Equal(t, expectedError, err)
// } // }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if err == nil {
return Fail(t, "An error is expected but got nil.", msgAndArgs...) return Fail(t, "An error is expected but got nil.", msgAndArgs...)
} }
@ -1314,7 +1447,8 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
return true return true
} }
// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExists checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1332,7 +1466,24 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true return true
} }
// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // NoFileExists checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
info, err := os.Lstat(path)
if err != nil {
return true
}
if info.IsDir() {
return true
}
return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
}
// DirExists checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1350,6 +1501,25 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true return true
} }
// NoDirExists checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
info, err := os.Lstat(path)
if err != nil {
if os.IsNotExist(err) {
return true
}
return true
}
if !info.IsDir() {
return true
}
return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
}
// JSONEq asserts that two JSON strings are equivalent. // JSONEq asserts that two JSON strings are equivalent.
// //
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@ -1439,15 +1609,6 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff return "\n\nDiff:\n" + diff
} }
// validateEqualArgs checks whether provided arguments can be safely used in the
// Equal/NotEqual functions.
func validateEqualArgs(expected, actual interface{}) error {
if isFunction(expected) || isFunction(actual) {
return errors.New("cannot take func type as argument")
}
return nil
}
func isFunction(arg interface{}) bool { func isFunction(arg interface{}) bool {
if arg == nil { if arg == nil {
return false return false
@ -1460,6 +1621,7 @@ var spewConfig = spew.ConfigState{
DisablePointerAddresses: true, DisablePointerAddresses: true,
DisableCapacities: true, DisableCapacities: true,
SortKeys: true, SortKeys: true,
DisableMethods: true,
} }
type tHelper interface { type tHelper interface {
@ -1475,24 +1637,59 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
h.Helper() h.Helper()
} }
ch := make(chan bool, 1)
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
ticker := time.NewTicker(tick)
checkPassed := make(chan bool)
defer timer.Stop() defer timer.Stop()
ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
defer close(checkPassed)
for { for tick := ticker.C; ; {
select { select {
case <-timer.C: case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case result := <-checkPassed: case <-tick:
if result { tick = nil
go func() { ch <- condition() }()
case v := <-ch:
if v {
return true return true
} }
case <-ticker.C: tick = ticker.C
go func() { }
checkPassed <- condition() }
}() }
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
ch := make(chan bool, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
ticker := time.NewTicker(tick)
defer ticker.Stop()
for tick := ticker.C; ; {
select {
case <-timer.C:
return true
case <-tick:
tick = nil
go func() { ch <- condition() }()
case v := <-ch:
if v {
return Fail(t, "Condition satisfied", msgAndArgs...)
}
tick = ticker.C
} }
} }
} }

View File

@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
} }
} }
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"

View File

@ -33,7 +33,6 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
} }
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
@ -56,7 +55,6 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
} }
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
@ -79,7 +77,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
} }
isErrorCode := code >= http.StatusBadRequest isErrorCode := code >= http.StatusBadRequest
@ -90,6 +87,28 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
return isErrorCode return isErrorCode
} }
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
successful := code == statuscode
if !successful {
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
}
return successful
}
// HTTPBody is a helper that returns HTTP body of the response. It returns // HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails. // empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {

View File

@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
} }
} }
//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"

View File

@ -66,7 +66,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
t.FailNow() t.FailNow()
} }
// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExists checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -77,7 +78,8 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExistsf checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -210,7 +212,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -275,12 +277,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) {
// //
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) // assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
return
}
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
return
}
t.FailNow() t.FailNow()
} }
@ -289,12 +291,12 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
// //
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") // assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
return
}
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
return
}
t.FailNow() t.FailNow()
} }
@ -313,7 +315,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
// Exactlyf asserts that two objects are equal in value and type. // Exactlyf asserts that two objects are equal in value and type.
// //
// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) // assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -394,7 +396,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
t.FailNow() t.FailNow()
} }
// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExists checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -405,7 +408,8 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExistsf checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -466,7 +470,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// Greaterf asserts that the first element is greater than the second // Greaterf asserts that the first element is greater than the second
// //
// assert.Greaterf(t, 2, 1, "error message %s", "formatted") // assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) // assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted") // assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -561,7 +565,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
// //
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -591,7 +595,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
// //
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -602,6 +606,36 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
t.FailNow() t.FailNow()
} }
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) {
return
}
t.FailNow()
}
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) {
return
}
t.FailNow()
}
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
@ -647,7 +681,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
// Implementsf asserts that an object is implemented by the specified interface. // Implementsf asserts that an object is implemented by the specified interface.
// //
// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) // assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -660,7 +694,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) // assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -717,7 +751,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f
// InDeltaf asserts that the two numerals are within delta of each other. // InDeltaf asserts that the two numerals are within delta of each other.
// //
// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) // assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -820,28 +854,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
t.FailNow() t.FailNow()
} }
// YAMLEq asserts that two YAML strings are equivalent.
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
return
}
t.FailNow()
}
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.YAMLEqf(t, expected, actual, msg, args...) {
return
}
t.FailNow()
}
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -920,7 +932,7 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
// Lessf asserts that the first element is less than the second // Lessf asserts that the first element is less than the second
// //
// assert.Lessf(t, 1, 2, "error message %s", "formatted") // assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) // assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted") // assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -932,6 +944,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
t.FailNow() t.FailNow()
} }
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Never(t, condition, waitFor, tick, msgAndArgs...) {
return
}
t.FailNow()
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Neverf(t, condition, waitFor, tick, msg, args...) {
return
}
t.FailNow()
}
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// assert.Nil(t, err) // assert.Nil(t, err)
@ -958,6 +998,30 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
t.FailNow() t.FailNow()
} }
// NoDirExists checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NoDirExists(t, path, msgAndArgs...) {
return
}
t.FailNow()
}
// NoDirExistsf checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NoDirExistsf(t, path, msg, args...) {
return
}
t.FailNow()
}
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -990,6 +1054,30 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
t.FailNow() t.FailNow()
} }
// NoFileExists checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NoFileExists(t, path, msgAndArgs...) {
return
}
t.FailNow()
}
// NoFileExistsf checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NoFileExistsf(t, path, msg, args...) {
return
}
t.FailNow()
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -1070,6 +1158,32 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
t.FailNow() t.FailNow()
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValues(t, obj1, obj2)
func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotEqualValues(t, expected, actual, msgAndArgs...) {
return
}
t.FailNow()
}
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotEqualValuesf(t, expected, actual, msg, args...) {
return
}
t.FailNow()
}
// NotEqualf asserts that the specified values are NOT equal. // NotEqualf asserts that the specified values are NOT equal.
// //
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
@ -1154,7 +1268,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
// NotRegexpf asserts that a specified regexp does not match a string. // NotRegexpf asserts that a specified regexp does not match a string.
// //
// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -1166,6 +1280,38 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
t.FailNow() t.FailNow()
} }
// NotSame asserts that two pointers do not reference the same object.
//
// assert.NotSame(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotSame(t, expected, actual, msgAndArgs...) {
return
}
t.FailNow()
}
// NotSamef asserts that two pointers do not reference the same object.
//
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotSamef(t, expected, actual, msg, args...) {
return
}
t.FailNow()
}
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...). // elements given in the specified subset(array, slice...).
// //
@ -1229,6 +1375,36 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// PanicsWithError asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.PanicsWithError(t, errString, f, msgAndArgs...) {
return
}
t.FailNow()
}
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.PanicsWithErrorf(t, errString, f, msg, args...) {
return
}
t.FailNow()
}
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value. // the recovered panic value equals the expected panic value.
// //
@ -1286,7 +1462,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
// Regexpf asserts that a specified regexp matches a string. // Regexpf asserts that a specified regexp matches a string.
// //
// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -1410,6 +1586,28 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
t.FailNow() t.FailNow()
} }
// YAMLEq asserts that two YAML strings are equivalent.
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
return
}
t.FailNow()
}
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.YAMLEqf(t, expected, actual, msg, args...) {
return
}
t.FailNow()
}
// Zero asserts that i is the zero value for its type. // Zero asserts that i is the zero value for its type.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {

View File

@ -54,7 +54,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
Containsf(a.t, s, contains, msg, args...) Containsf(a.t, s, contains, msg, args...)
} }
// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExists checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -62,7 +63,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
DirExists(a.t, path, msgAndArgs...) DirExists(a.t, path, msgAndArgs...)
} }
// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. // DirExistsf checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -168,7 +170,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal. // and equal.
// //
// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -250,7 +252,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type. // Exactlyf asserts that two objects are equal in value and type.
// //
// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) // a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -310,7 +312,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
Falsef(a.t, value, msg, args...) Falsef(a.t, value, msg, args...)
} }
// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExists checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -318,7 +321,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
FileExists(a.t, path, msgAndArgs...) FileExists(a.t, path, msgAndArgs...)
} }
// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. // FileExistsf checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -367,7 +371,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second // Greaterf asserts that the first element is greater than the second
// //
// a.Greaterf(2, 1, "error message %s", "formatted") // a.Greaterf(2, 1, "error message %s", "formatted")
// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) // a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted") // a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -444,7 +448,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
// //
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -468,7 +472,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
// //
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// //
// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). // Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -476,6 +480,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
HTTPRedirectf(a.t, handler, method, url, values, msg, args...) HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
} }
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
}
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
}
// HTTPSuccess asserts that a specified handler returns a success status code. // HTTPSuccess asserts that a specified handler returns a success status code.
// //
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@ -512,7 +540,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface. // Implementsf asserts that an object is implemented by the specified interface.
// //
// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) // a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -522,7 +550,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other. // InDelta asserts that the two numerals are within delta of each other.
// //
// a.InDelta(math.Pi, (22 / 7.0), 0.01) // a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -564,7 +592,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other. // InDeltaf asserts that the two numerals are within delta of each other.
// //
// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) // a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -640,22 +668,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
JSONEqf(a.t, expected, actual, msg, args...) JSONEqf(a.t, expected, actual, msg, args...)
} }
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEqf(a.t, expected, actual, msg, args...)
}
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept. // Len also fails if the object has a type that len() not accept.
// //
@ -719,7 +731,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second // Lessf asserts that the first element is less than the second
// //
// a.Lessf(1, 2, "error message %s", "formatted") // a.Lessf(1, 2, "error message %s", "formatted")
// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) // a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted") // a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -728,6 +740,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
Lessf(a.t, e1, e2, msg, args...) Lessf(a.t, e1, e2, msg, args...)
} }
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Never(a.t, condition, waitFor, tick, msgAndArgs...)
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Neverf(a.t, condition, waitFor, tick, msg, args...)
}
// Nil asserts that the specified object is nil. // Nil asserts that the specified object is nil.
// //
// a.Nil(err) // a.Nil(err)
@ -748,6 +782,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
Nilf(a.t, object, msg, args...) Nilf(a.t, object, msg, args...)
} }
// NoDirExists checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NoDirExists(a.t, path, msgAndArgs...)
}
// NoDirExistsf checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NoDirExistsf(a.t, path, msg, args...)
}
// NoError asserts that a function returned no error (i.e. `nil`). // NoError asserts that a function returned no error (i.e. `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
@ -774,6 +826,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
NoErrorf(a.t, err, msg, args...) NoErrorf(a.t, err, msg, args...)
} }
// NoFileExists checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NoFileExists(a.t, path, msgAndArgs...)
}
// NoFileExistsf checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NoFileExistsf(a.t, path, msg, args...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element. // specified substring or element.
// //
@ -839,6 +909,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
NotEqual(a.t, expected, actual, msgAndArgs...) NotEqual(a.t, expected, actual, msgAndArgs...)
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
// a.NotEqualValues(obj1, obj2)
func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotEqualValues(a.t, expected, actual, msgAndArgs...)
}
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotEqualValuesf(a.t, expected, actual, msg, args...)
}
// NotEqualf asserts that the specified values are NOT equal. // NotEqualf asserts that the specified values are NOT equal.
// //
// a.NotEqualf(obj1, obj2, "error message %s", "formatted") // a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@ -905,7 +995,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string. // NotRegexpf asserts that a specified regexp does not match a string.
// //
// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -914,6 +1004,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
NotRegexpf(a.t, rx, str, msg, args...) NotRegexpf(a.t, rx, str, msg, args...)
} }
// NotSame asserts that two pointers do not reference the same object.
//
// a.NotSame(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotSame(a.t, expected, actual, msgAndArgs...)
}
// NotSamef asserts that two pointers do not reference the same object.
//
// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotSamef(a.t, expected, actual, msg, args...)
}
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...). // elements given in the specified subset(array, slice...).
// //
@ -962,6 +1078,30 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...) Panics(a.t, f, msgAndArgs...)
} }
// PanicsWithError asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// a.PanicsWithError("crazy error", func(){ GoCrazy() })
func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
PanicsWithError(a.t, errString, f, msgAndArgs...)
}
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
PanicsWithErrorf(a.t, errString, f, msg, args...)
}
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value. // the recovered panic value equals the expected panic value.
// //
@ -1007,7 +1147,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string. // Regexpf asserts that a specified regexp matches a string.
// //
// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
@ -1104,6 +1244,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
WithinDurationf(a.t, expected, actual, delta, msg, args...) WithinDurationf(a.t, expected, actual, delta, msg, args...)
} }
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEqf(a.t, expected, actual, msg, args...)
}
// Zero asserts that i is the zero value for its type. // Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {

View File

@ -26,4 +26,4 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{})
// for table driven tests. // for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) type ErrorAssertionFunc func(TestingT, error, ...interface{})
//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"

1
vendor/go.opencensus.io/go.sum generated vendored
View File

@ -67,6 +67,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -1694,6 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
if len(data) > 0 { if len(data) > 0 {
wrote, err := st.body.Write(data) wrote, err := st.body.Write(data)
if err != nil { if err != nil {
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return streamError(id, ErrCodeStreamClosed) return streamError(id, ErrCodeStreamClosed)
} }
if wrote != len(data) { if wrote != len(data) {
@ -2020,7 +2021,11 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
} }
if bodyOpen { if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok { if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
req.ContentLength = int64(cl)
} else {
req.ContentLength = 0
}
} else { } else {
req.ContentLength = -1 req.ContentLength = -1
} }
@ -2403,9 +2408,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var ctype, clen string var ctype, clen string
if clen = rws.snapHeader.Get("Content-Length"); clen != "" { if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
rws.snapHeader.Del("Content-Length") rws.snapHeader.Del("Content-Length")
clen64, err := strconv.ParseInt(clen, 10, 64) if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
if err == nil && clen64 >= 0 { rws.sentContentLen = int64(cl)
rws.sentContentLen = clen64
} else { } else {
clen = "" clen = ""
} }

View File

@ -154,12 +154,21 @@ func (t *Transport) pingTimeout() time.Duration {
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled. // It returns an error if t1 has already been HTTP/2-enabled.
//
// Use ConfigureTransports instead to configure the HTTP/2 Transport.
func ConfigureTransport(t1 *http.Transport) error { func ConfigureTransport(t1 *http.Transport) error {
_, err := configureTransport(t1) _, err := ConfigureTransports(t1)
return err return err
} }
func configureTransport(t1 *http.Transport) (*Transport, error) { // ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns a new HTTP/2 Transport for further configuration.
// It returns an error if t1 has already been HTTP/2-enabled.
func ConfigureTransports(t1 *http.Transport) (*Transport, error) {
return configureTransports(t1)
}
func configureTransports(t1 *http.Transport) (*Transport, error) {
connPool := new(clientConnPool) connPool := new(clientConnPool)
t2 := &Transport{ t2 := &Transport{
ConnPool: noDialClientConnPool{connPool}, ConnPool: noDialClientConnPool{connPool},
@ -689,6 +698,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush() cc.bw.Flush()
if cc.werr != nil { if cc.werr != nil {
cc.Close()
return nil, cc.werr return nil, cc.werr
} }
@ -1080,6 +1090,15 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
bodyWriter := cc.t.getBodyWriterState(cs, body) bodyWriter := cc.t.getBodyWriterState(cs, body)
cs.on100 = bodyWriter.on100 cs.on100 = bodyWriter.on100
defer func() {
cc.wmu.Lock()
werr := cc.werr
cc.wmu.Unlock()
if werr != nil {
cc.Close()
}
}()
cc.wmu.Lock() cc.wmu.Lock()
endStream := !hasBody && !hasTrailers endStream := !hasBody && !hasTrailers
werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
@ -1129,6 +1148,9 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
// we can keep it. // we can keep it.
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite) cs.abortRequestBodyWrite(errStopReqBodyWrite)
if hasBody && !bodyWritten {
<-bodyWriter.resc
}
} }
if re.err != nil { if re.err != nil {
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
@ -1149,6 +1171,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
<-bodyWriter.resc
} }
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), errTimeout return nil, cs.getStartedWrite(), errTimeout
@ -1158,6 +1181,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
<-bodyWriter.resc
} }
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), ctx.Err() return nil, cs.getStartedWrite(), ctx.Err()
@ -1167,6 +1191,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
} else { } else {
bodyWriter.cancel() bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
<-bodyWriter.resc
} }
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), errRequestCanceled return nil, cs.getStartedWrite(), errRequestCanceled
@ -1176,6 +1201,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
// forgetStreamID. // forgetStreamID.
return nil, cs.getStartedWrite(), cs.resetErr return nil, cs.getStartedWrite(), cs.resetErr
case err := <-bodyWriter.resc: case err := <-bodyWriter.resc:
bodyWritten = true
// Prefer the read loop's response, if available. Issue 16102. // Prefer the read loop's response, if available. Issue 16102.
select { select {
case re := <-readLoopResCh: case re := <-readLoopResCh:
@ -1186,7 +1212,6 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
cc.forgetStreamID(cs.ID) cc.forgetStreamID(cs.ID)
return nil, cs.getStartedWrite(), err return nil, cs.getStartedWrite(), err
} }
bodyWritten = true
if d := cc.responseHeaderTimeout(); d != 0 { if d := cc.responseHeaderTimeout(); d != 0 {
timer := time.NewTimer(d) timer := time.NewTimer(d)
defer timer.Stop() defer timer.Stop()
@ -2006,8 +2031,8 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
if !streamEnded || isHead { if !streamEnded || isHead {
res.ContentLength = -1 res.ContentLength = -1
if clens := res.Header["Content-Length"]; len(clens) == 1 { if clens := res.Header["Content-Length"]; len(clens) == 1 {
if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
res.ContentLength = clen64 res.ContentLength = int64(cl)
} else { } else {
// TODO: care? unlike http/1, it won't mess up our framing, so it's // TODO: care? unlike http/1, it won't mess up our framing, so it's
// more safe smuggling-wise to ignore. // more safe smuggling-wise to ignore.
@ -2525,6 +2550,7 @@ func strSliceContains(ss []string, s string) bool {
type erringRoundTripper struct{ err error } type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
// gzipReader wraps a response body so it can lazily // gzipReader wraps a response body so it can lazily

View File

@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.14 // +build go1.14,!go1.16
package idna package idna

4839
vendor/golang.org/x/net/idna/tables13.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

13
vendor/golang.org/x/oauth2/README.md generated vendored
View File

@ -16,15 +16,16 @@ Or you can manually git clone the repository to
See godoc for further documentation and examples. See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) * [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) * [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google)
## Policy for new packages ## Policy for new packages
We no longer accept new provider-specific packages in this repo. For We no longer accept new provider-specific packages in this repo if all
defining provider endpoints and provider-specific OAuth2 behavior, we they do is add a single endpoint variable. If you just want to add a
encourage you to create packages elsewhere. We'll keep the existing single endpoint, add it to the
packages for compatibility. [godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints)
package.
## Report Issues / Send Patches ## Report Issues / Send Patches

View File

@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.14 // +build go1.14,!go1.16
package bidi package bidi

1955
vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.14 // +build go1.14,!go1.16
package norm package norm

7760
vendor/golang.org/x/text/unicode/norm/tables13.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -53,10 +53,9 @@ func Every(interval time.Duration) Limit {
// //
// The methods AllowN, ReserveN, and WaitN consume n tokens. // The methods AllowN, ReserveN, and WaitN consume n tokens.
type Limiter struct { type Limiter struct {
mu sync.Mutex
limit Limit limit Limit
burst int burst int
mu sync.Mutex
tokens float64 tokens float64
// last is the last time the limiter's tokens field was updated // last is the last time the limiter's tokens field was updated
last time.Time last time.Time
@ -76,6 +75,8 @@ func (lim *Limiter) Limit() Limit {
// Burst values allow more events to happen at once. // Burst values allow more events to happen at once.
// A zero Burst allows no events, unless limit == Inf. // A zero Burst allows no events, unless limit == Inf.
func (lim *Limiter) Burst() int { func (lim *Limiter) Burst() int {
lim.mu.Lock()
defer lim.mu.Unlock()
return lim.burst return lim.burst
} }
@ -196,7 +197,7 @@ func (lim *Limiter) Reserve() *Reservation {
// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. // ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
// The Limiter takes this Reservation into account when allowing future events. // The Limiter takes this Reservation into account when allowing future events.
// ReserveN returns false if n exceeds the Limiter's burst size. // The returned Reservations OK() method returns false if n exceeds the Limiter's burst size.
// Usage example: // Usage example:
// r := lim.ReserveN(time.Now(), 1) // r := lim.ReserveN(time.Now(), 1)
// if !r.OK() { // if !r.OK() {
@ -229,7 +230,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
lim.mu.Unlock() lim.mu.Unlock()
if n > burst && limit != Inf { if n > burst && limit != Inf {
return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst)
} }
// Check if ctx is already cancelled // Check if ctx is already cancelled
select { select {
@ -359,6 +360,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio
// advance calculates and returns an updated state for lim resulting from the passage of time. // advance calculates and returns an updated state for lim resulting from the passage of time.
// lim is not changed. // lim is not changed.
// advance requires that lim.mu is held.
func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
last := lim.last last := lim.last
if now.Before(last) { if now.Before(last) {

16
vendor/gopkg.in/yaml.v3/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
go:
- "1.4.x"
- "1.5.x"
- "1.6.x"
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
- "1.11.x"
- "1.12.x"
- "1.13.x"
- "tip"
go_import_path: gopkg.in/yaml.v3

50
vendor/gopkg.in/yaml.v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,50 @@
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/gopkg.in/yaml.v3/NOTICE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

150
vendor/gopkg.in/yaml.v3/README.md generated vendored Normal file
View File

@ -0,0 +1,150 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.2, but preserves some behavior
from 1.1 for backwards compatibility.
Specifically, as of v3 of the yaml package:
- YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
decoded into a typed bool value. Otherwise they behave as a string. Booleans
in YAML 1.2 are _true/false_ only.
- Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
as specified in YAML 1.2, because most parsers still use the old format.
Octals in the _0o777_ format are supported though, so new files work.
- Does not support base-60 floats. These are gone from YAML 1.2, and were
actually never supported by this package as it's clearly a poor choice.
and offers backwards
compatibility with YAML 1.1 in some cases.
1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v3*.
To install it, run:
go get gopkg.in/yaml.v3
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
- [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
API stability
-------------
The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v3"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

746
vendor/gopkg.in/yaml.v3/apic.go generated vendored Normal file
View File

@ -0,0 +1,746 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
// Create ALIAS.
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
*event = yaml_event_t{
typ: yaml_ALIAS_EVENT,
anchor: anchor,
}
return true
}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

931
vendor/gopkg.in/yaml.v3/decode.go generated vendored Normal file
View File

@ -0,0 +1,931 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"io"
"math"
"reflect"
"strconv"
"time"
)
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *Node
anchors map[string]*Node
doneInit bool
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
return &p
}
func newParserFromReader(r io.Reader) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
yaml_parser_set_input_reader(&p.parser, r)
return &p
}
func (p *parser) init() {
if p.doneInit {
return
}
p.anchors = make(map[string]*Node)
p.expect(yaml_STREAM_START_EVENT)
p.doneInit = true
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
// expect consumes an event from the event stream and
// checks that it's of the expected type.
func (p *parser) expect(e yaml_event_type_t) {
if p.event.typ == yaml_NO_EVENT {
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
if p.event.typ != e {
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
p.fail()
}
yaml_event_delete(&p.event)
p.event.typ = yaml_NO_EVENT
}
// peek peeks at the next event in the event stream,
// puts the results into p.event and returns the event type.
func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
return p.event.typ
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
// Scanner errors don't iterate line before returning error
if p.parser.error == yaml_SCANNER_ERROR {
line++
}
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *Node, anchor []byte) {
if anchor != nil {
n.Anchor = string(anchor)
p.anchors[n.Anchor] = n
}
}
func (p *parser) parse() *Node {
p.init()
switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
case yaml_TAIL_COMMENT_EVENT:
panic("internal error: unexpected tail comment event (please report)")
default:
panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
}
}
func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
var style Style
if tag != "" && tag != "!" {
tag = shortTag(tag)
style = TaggedStyle
} else if defaultTag != "" {
tag = defaultTag
} else if kind == ScalarNode {
tag, _ = resolve("", value)
}
return &Node{
Kind: kind,
Tag: tag,
Value: value,
Style: style,
Line: p.event.start_mark.line + 1,
Column: p.event.start_mark.column + 1,
HeadComment: string(p.event.head_comment),
LineComment: string(p.event.line_comment),
FootComment: string(p.event.foot_comment),
}
}
func (p *parser) parseChild(parent *Node) *Node {
child := p.parse()
parent.Content = append(parent.Content, child)
return child
}
func (p *parser) document() *Node {
n := p.node(DocumentNode, "", "", "")
p.doc = n
p.expect(yaml_DOCUMENT_START_EVENT)
p.parseChild(n)
if p.peek() == yaml_DOCUMENT_END_EVENT {
n.FootComment = string(p.event.foot_comment)
}
p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *Node {
n := p.node(AliasNode, "", "", string(p.event.anchor))
n.Alias = p.anchors[n.Value]
if n.Alias == nil {
failf("unknown anchor '%s' referenced", n.Value)
}
p.expect(yaml_ALIAS_EVENT)
return n
}
func (p *parser) scalar() *Node {
var parsedStyle = p.event.scalar_style()
var nodeStyle Style
switch {
case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
nodeStyle = DoubleQuotedStyle
case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
nodeStyle = SingleQuotedStyle
case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
nodeStyle = LiteralStyle
case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
nodeStyle = FoldedStyle
}
var nodeValue = string(p.event.value)
var nodeTag = string(p.event.tag)
var defaultTag string
if nodeStyle == 0 {
if nodeValue == "<<" {
defaultTag = mergeTag
}
} else {
defaultTag = strTag
}
n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
n.Style |= nodeStyle
p.anchor(n, p.event.anchor)
p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *Node {
n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
n.Style |= FlowStyle
}
p.anchor(n, p.event.anchor)
p.expect(yaml_SEQUENCE_START_EVENT)
for p.peek() != yaml_SEQUENCE_END_EVENT {
p.parseChild(n)
}
n.LineComment = string(p.event.line_comment)
n.FootComment = string(p.event.foot_comment)
p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *Node {
n := p.node(MappingNode, mapTag, string(p.event.tag), "")
block := true
if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
block = false
n.Style |= FlowStyle
}
p.anchor(n, p.event.anchor)
p.expect(yaml_MAPPING_START_EVENT)
for p.peek() != yaml_MAPPING_END_EVENT {
k := p.parseChild(n)
if block && k.FootComment != "" {
// Must be a foot comment for the prior value when being dedented.
if len(n.Content) > 2 {
n.Content[len(n.Content)-3].FootComment = k.FootComment
k.FootComment = ""
}
}
v := p.parseChild(n)
if k.FootComment == "" && v.FootComment != "" {
k.FootComment = v.FootComment
v.FootComment = ""
}
if p.peek() == yaml_TAIL_COMMENT_EVENT {
if k.FootComment == "" {
k.FootComment = string(p.event.foot_comment)
}
p.expect(yaml_TAIL_COMMENT_EVENT)
}
}
n.LineComment = string(p.event.line_comment)
n.FootComment = string(p.event.foot_comment)
if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
n.Content[len(n.Content)-2].FootComment = n.FootComment
n.FootComment = ""
}
p.expect(yaml_MAPPING_END_EVENT)
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *Node
aliases map[*Node]bool
terrors []string
stringMapType reflect.Type
generalMapType reflect.Type
knownFields bool
uniqueKeys bool
decodeCount int
aliasCount int
aliasDepth int
}
var (
nodeType = reflect.TypeOf(Node{})
durationType = reflect.TypeOf(time.Duration(0))
stringMapType = reflect.TypeOf(map[string]interface{}{})
generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = generalMapType.Elem()
timeType = reflect.TypeOf(time.Time{})
ptrTimeType = reflect.TypeOf(&time.Time{})
)
func newDecoder() *decoder {
d := &decoder{
stringMapType: stringMapType,
generalMapType: generalMapType,
uniqueKeys: true,
}
d.aliases = make(map[*Node]bool)
return d
}
func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
if n.Tag != "" {
tag = n.Tag
}
value := n.Value
if tag != seqTag && tag != mapTag {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
err := u.UnmarshalYAML(n)
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.ShortTag() == nullTag {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
outi := out.Addr().Interface()
if u, ok := outi.(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
if u, ok := outi.(obsoleteUnmarshaler); ok {
good = d.callObsoleteUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
if n.ShortTag() == nullTag {
return reflect.Value{}
}
for _, num := range index {
for {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
continue
}
break
}
v = v.Field(num)
}
return v
}
const (
// 400,000 decode operations is ~500kb of dense object declarations, or
// ~5kb of dense object declarations with 10000% alias expansion
alias_ratio_range_low = 400000
// 4,000,000 decode operations is ~5MB of dense object declarations, or
// ~4.5MB of dense object declarations with 10% alias expansion
alias_ratio_range_high = 4000000
// alias_ratio_range is the range over which we scale allowed alias ratios
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
)
func allowedAliasRatio(decodeCount int) float64 {
switch {
case decodeCount <= alias_ratio_range_low:
// allow 99% to come from alias expansion for small-to-medium documents
return 0.99
case decodeCount >= alias_ratio_range_high:
// allow 10% to come from alias expansion for very large documents
return 0.10
default:
// scale smoothly from 99% down to 10% over the range.
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
}
}
func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
d.decodeCount++
if d.aliasDepth > 0 {
d.aliasCount++
}
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
failf("document contains excessive aliasing")
}
if out.Type() == nodeType {
out.Set(reflect.ValueOf(n).Elem())
return true
}
switch n.Kind {
case DocumentNode:
return d.document(n, out)
case AliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.Kind {
case ScalarNode:
good = d.scalar(n, out)
case MappingNode:
good = d.mapping(n, out)
case SequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind)))
}
return good
}
func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
if len(n.Content) == 1 {
d.doc = n
d.unmarshal(n.Content[0], out)
return true
}
return false
}
func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
if d.aliases[n] {
// TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.Value)
}
d.aliases[n] = true
d.aliasDepth++
good = d.unmarshal(n.Alias, out)
d.aliasDepth--
delete(d.aliases, n)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *Node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.indicatedString() {
tag = strTag
resolved = n.Value
} else {
tag, resolved = resolve(n.Tag, n.Value)
if tag == binaryTag {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.CanAddr() {
switch out.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
out.Set(reflect.Zero(out.Type()))
return true
}
}
return false
}
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
// We've resolved to exactly the type we want, so use that.
out.Set(resolvedv)
return true
}
// Perhaps we can use the value as a TextUnmarshaler to
// set its value.
if out.CanAddr() {
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
var text []byte
if tag == binaryTag {
text = []byte(resolved.(string))
} else {
// We let any value be unmarshaled into TextUnmarshaler.
// That might be more lax than we'd like, but the
// TextUnmarshaler itself should bowl out any dubious values.
text = []byte(n.Value)
}
err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == binaryTag {
out.SetString(resolved.(string))
return true
}
out.SetString(n.Value)
return true
case reflect.Interface:
out.Set(reflect.ValueOf(resolved))
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
// This used to work in v2, but it's very unfriendly.
isDuration := out.Type() == durationType
switch resolved := resolved.(type) {
case int:
if !isDuration && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case int64:
if !isDuration && !out.OverflowInt(resolved) {
out.SetInt(resolved)
return true
}
case uint64:
if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case float64:
if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
return true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
return true
case string:
// This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
// It only works if explicitly attempting to unmarshal into a typed bool value.
switch resolved {
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
out.SetBool(true)
return true
case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
out.SetBool(false)
return true
}
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
return true
case int64:
out.SetFloat(float64(resolved))
return true
case uint64:
out.SetFloat(float64(resolved))
return true
case float64:
out.SetFloat(resolved)
return true
}
case reflect.Struct:
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
out.Set(resolvedv)
return true
}
case reflect.Ptr:
panic("yaml internal error: please report the issue")
}
d.terror(n, tag, out)
return false
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
l := len(n.Content)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Array:
if l != out.Len() {
failf("invalid array: want %d elements but got %d", out.Len(), l)
}
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, seqTag, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.Content[i], e); ok {
out.Index(j).Set(e)
j++
}
}
if out.Kind() != reflect.Array {
out.Set(out.Slice(0, j))
}
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
l := len(n.Content)
if d.uniqueKeys {
nerrs := len(d.terrors)
for i := 0; i < l; i += 2 {
ni := n.Content[i]
for j := i + 2; j < l; j += 2 {
nj := n.Content[j]
if ni.Kind == nj.Kind && ni.Value == nj.Value {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
}
}
}
if len(d.terrors) > nerrs {
return false
}
}
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Map:
// okay
case reflect.Interface:
iface := out
if isStringMap(n) {
out = reflect.MakeMap(d.stringMapType)
} else {
out = reflect.MakeMap(d.generalMapType)
}
iface.Set(out)
default:
d.terror(n, mapTag, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
stringMapType := d.stringMapType
generalMapType := d.generalMapType
if outt.Elem() == ifaceType {
if outt.Key().Kind() == reflect.String {
d.stringMapType = outt
} else if outt.Key() == ifaceType {
d.generalMapType = outt
}
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
for i := 0; i < l; i += 2 {
if isMerge(n.Content[i]) {
d.merge(n.Content[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.Content[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.Content[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.stringMapType = stringMapType
d.generalMapType = generalMapType
return true
}
func isStringMap(n *Node) bool {
if n.Kind != MappingNode {
return false
}
l := len(n.Content)
for i := 0; i < l; i += 2 {
if n.Content[i].ShortTag() != strTag {
return false
}
}
return true
}
func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for _, index := range sinfo.InlineUnmarshalers {
field := d.fieldByIndex(n, out, index)
d.prepare(n, field)
}
var doneFields []bool
if d.uniqueKeys {
doneFields = make([]bool, len(sinfo.FieldsList))
}
name := settableValueOf("")
l := len(n.Content)
for i := 0; i < l; i += 2 {
ni := n.Content[i]
if isMerge(ni) {
d.merge(n.Content[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
if d.uniqueKeys {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
continue
}
doneFields[info.Id] = true
}
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = d.fieldByIndex(n, out, info.Inline)
}
d.unmarshal(n.Content[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.Content[i+1], value)
inlineMap.SetMapIndex(name, value)
} else if d.knownFields {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *Node, out reflect.Value) {
switch n.Kind {
case MappingNode:
d.unmarshal(n, out)
case AliasNode:
if n.Alias != nil && n.Alias.Kind != MappingNode {
failWantMap()
}
d.unmarshal(n, out)
case SequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.Content) - 1; i >= 0; i-- {
ni := n.Content[i]
if ni.Kind == AliasNode {
if ni.Alias != nil && ni.Alias.Kind != MappingNode {
failWantMap()
}
} else if ni.Kind != MappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *Node) bool {
return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
}

1992
vendor/gopkg.in/yaml.v3/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

561
vendor/gopkg.in/yaml.v3/encode.go generated vendored Normal file
View File

@ -0,0 +1,561 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
indent int
doneInit bool
}
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
if e.indent == 0 {
e.indent = 4
}
e.emitter.best_indent = e.indent
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
var node *Node
if in.IsValid() {
node, _ = in.Interface().(*Node)
}
if node != nil && node.Kind == DocumentNode {
e.nodev(in)
} else {
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
tag = shortTag(tag)
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
switch value := iface.(type) {
case *Node:
e.nodev(in)
return
case time.Time:
e.timev(tag, in)
return
case *time.Time:
e.timev(tag, in.Elem())
return
case time.Duration:
e.stringv(tag, reflect.ValueOf(value.String()))
return
case Marshaler:
v, err := value.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
e.marshal(tag, reflect.ValueOf(v))
return
case encoding.TextMarshaler:
text, err := value.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
e.marshal(tag, in.Elem())
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice, reflect.Array:
e.slicev(tag, in)
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.intv(tag, in)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
for _, num := range index {
for {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
continue
}
break
}
v = v.Field(num)
}
return v
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = e.fieldByIndex(in, info.Inline)
if !value.IsValid() {
continue
}
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
// isOldBool returns whether s is bool notation as defined in YAML 1.1.
//
// We continue to force strings that YAML 1.1 would interpret as booleans to be
// rendered as quotes strings so that the marshalled output valid for YAML 1.1
// parsing.
func isOldBool(s string) (result bool) {
switch s {
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
"n", "N", "no", "No", "NO", "off", "Off", "OFF":
return true
default:
return false
}
}
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
}
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
if e.flow {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else {
style = yaml_LITERAL_SCALAR_STYLE
}
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
implicit := tag == ""
if !implicit {
tag = longTag(tag)
}
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.event.head_comment = head
e.event.line_comment = line
e.event.foot_comment = foot
e.event.tail_comment = tail
e.emit()
}
func (e *encoder) nodev(in reflect.Value) {
e.node(in.Interface().(*Node), "")
}
func (e *encoder) node(node *Node, tail string) {
// If the tag was not explicitly requested, and dropping it won't change the
// implicit tag of the value, don't include it in the presentation.
var tag = node.Tag
var stag = shortTag(tag)
var rtag string
var forceQuoting bool
if tag != "" && node.Style&TaggedStyle == 0 {
if node.Kind == ScalarNode {
if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
tag = ""
} else {
rtag, _ = resolve("", node.Value)
if rtag == stag {
tag = ""
} else if stag == strTag {
tag = ""
forceQuoting = true
}
}
} else {
switch node.Kind {
case MappingNode:
rtag = mapTag
case SequenceNode:
rtag = seqTag
}
if rtag == stag {
tag = ""
}
}
}
switch node.Kind {
case DocumentNode:
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
yaml_document_end_event_initialize(&e.event, true)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case SequenceNode:
style := yaml_BLOCK_SEQUENCE_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style))
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case MappingNode:
style := yaml_BLOCK_MAPPING_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)
e.event.tail_comment = []byte(tail)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
// The tail logic below moves the foot comment of prior keys to the following key,
// since the value for each key may be a nested structure and the foot needs to be
// processed only the entirety of the value is streamed. The last tail is processed
// with the mapping end event.
var tail string
for i := 0; i+1 < len(node.Content); i += 2 {
k := node.Content[i]
foot := k.FootComment
if foot != "" {
kopy := *k
kopy.FootComment = ""
k = &kopy
}
e.node(k, tail)
tail = foot
v := node.Content[i+1]
e.node(v, "")
}
yaml_mapping_end_event_initialize(&e.event)
e.event.tail_comment = []byte(tail)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case AliasNode:
yaml_alias_event_initialize(&e.event, []byte(node.Value))
e.event.head_comment = []byte(node.HeadComment)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case ScalarNode:
value := node.Value
if !utf8.ValidString(value) {
if tag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
value = encodeBase64(value)
}
style := yaml_PLAIN_SCALAR_STYLE
switch {
case node.Style&DoubleQuotedStyle != 0:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
case node.Style&SingleQuotedStyle != 0:
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
case node.Style&LiteralStyle != 0:
style = yaml_LITERAL_SCALAR_STYLE
case node.Style&FoldedStyle != 0:
style = yaml_FOLDED_SCALAR_STYLE
case strings.Contains(value, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
case forceQuoting:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
}
}

5
vendor/gopkg.in/yaml.v3/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module "gopkg.in/yaml.v3"
require (
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
)

1229
vendor/gopkg.in/yaml.v3/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

434
vendor/gopkg.in/yaml.v3/readerc.go generated vendored Normal file
View File

@ -0,0 +1,434 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

326
vendor/gopkg.in/yaml.v3/resolve.go generated vendored Normal file
View File

@ -0,0 +1,326 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"time"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, boolTag, []string{"true", "True", "TRUE"}},
{false, boolTag, []string{"false", "False", "FALSE"}},
{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", mergeTag, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const (
nullTag = "!!null"
boolTag = "!!bool"
strTag = "!!str"
intTag = "!!int"
floatTag = "!!float"
timestampTag = "!!timestamp"
seqTag = "!!seq"
mapTag = "!!map"
binaryTag = "!!binary"
mergeTag = "!!merge"
)
var longTags = make(map[string]string)
var shortTags = make(map[string]string)
func init() {
for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
ltag := longTag(stag)
longTags[stag] = ltag
shortTags[ltag] = stag
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
if strings.HasPrefix(tag, longTagPrefix) {
if stag, ok := shortTags[tag]; ok {
return stag
}
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
if ltag, ok := longTags[tag]; ok {
return ltag
}
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
tag = shortTag(tag)
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, strTag, binaryTag:
return
case floatTag:
if rtag == intTag {
switch v := out.(type) {
case int64:
rtag = floatTag
out = float64(v)
return
case int:
rtag = floatTag
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != strTag && tag != binaryTag {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return floatTag, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == timestampTag {
t, ok := parseTimestamp(in)
if ok {
return timestampTag, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return intTag, uintv
}
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return floatTag, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
// Octals as introduced in version 1.2 of the spec.
// Octals from the 1.1 spec, spelled as 0777, are still
// decoded by default in v3 as well for compatibility.
// May be dropped in v4 depending on how usage evolves.
if strings.HasPrefix(plain, "0o") {
intv, err := strconv.ParseInt(plain[2:], 8, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 8, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0o") {
intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
default:
panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
}
}
return strTag, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

3025
vendor/gopkg.in/yaml.v3/scannerc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

134
vendor/gopkg.in/yaml.v3/sorter.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
digits := false
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
digits = unicode.IsDigit(ar[i])
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
if digits {
return al
} else {
return bl
}
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

48
vendor/gopkg.in/yaml.v3/writerc.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}

662
vendor/gopkg.in/yaml.v3/yaml.go generated vendored Normal file
View File

@ -0,0 +1,662 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"unicode/utf8"
)
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document.
type Unmarshaler interface {
UnmarshalYAML(value *Node) error
}
type obsoleteUnmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// A Decorder reads and decodes YAML values from an input stream.
type Decoder struct {
parser *parser
knownFields bool
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// KnownFields ensures that the keys in decoded mappings to
// exist as fields in the struct being decoded into.
func (dec *Decoder) KnownFields(enable bool) {
dec.knownFields = enable
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder()
d.knownFields = dec.knownFields
defer handleErr(&err)
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Decode decodes the node and stores its data into the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (n *Node) Decode(v interface{}) (err error) {
d := newDecoder()
defer handleErr(&err)
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(n, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only marshalled if they are exported (have an upper case
// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be included if that method returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// SetIndent changes the used indentation used when encoding.
func (e *Encoder) SetIndent(spaces int) {
if spaces < 0 {
panic("yaml: cannot indent to a negative number of spaces")
}
e.encoder.indent = spaces
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
type Kind uint32
const (
DocumentNode Kind = 1 << iota
SequenceNode
MappingNode
ScalarNode
AliasNode
)
type Style uint32
const (
TaggedStyle Style = 1 << iota
DoubleQuotedStyle
SingleQuotedStyle
LiteralStyle
FoldedStyle
FlowStyle
)
// Node represents an element in the YAML document hierarchy. While documents
// are typically encoded and decoded into higher level types, such as structs
// and maps, Node is an intermediate representation that allows detailed
// control over the content being decoded or encoded.
//
// Values that make use of the Node type interact with the yaml package in the
// same way any other type would do, by encoding and decoding yaml data
// directly or indirectly into them.
//
// For example:
//
// var person struct {
// Name string
// Address yaml.Node
// }
// err := yaml.Unmarshal(data, &person)
//
// Or by itself:
//
// var person Node
// err := yaml.Unmarshal(data, &person)
//
type Node struct {
// Kind defines whether the node is a document, a mapping, a sequence,
// a scalar value, or an alias to another node. The specific data type of
// scalar nodes may be obtained via the ShortTag and LongTag methods.
Kind Kind
// Style allows customizing the apperance of the node in the tree.
Style Style
// Tag holds the YAML tag defining the data type for the value.
// When decoding, this field will always be set to the resolved tag,
// even when it wasn't explicitly provided in the YAML content.
// When encoding, if this field is unset the value type will be
// implied from the node properties, and if it is set, it will only
// be serialized into the representation if TaggedStyle is used or
// the implicit tag diverges from the provided one.
Tag string
// Value holds the unescaped and unquoted represenation of the value.
Value string
// Anchor holds the anchor name for this node, which allows aliases to point to it.
Anchor string
// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
Alias *Node
// Content holds contained nodes for documents, mappings, and sequences.
Content []*Node
// HeadComment holds any comments in the lines preceding the node and
// not separated by an empty line.
HeadComment string
// LineComment holds any comments at the end of the line where the node is in.
LineComment string
// FootComment holds any comments following the node and before empty lines.
FootComment string
// Line and Column hold the node position in the decoded YAML text.
// These fields are not respected when encoding the node.
Line int
Column int
}
// LongTag returns the long form of the tag that indicates the data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) LongTag() string {
return longTag(n.ShortTag())
}
// ShortTag returns the short form of the YAML tag that indicates data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) ShortTag() string {
if n.indicatedString() {
return strTag
}
if n.Tag == "" || n.Tag == "!" {
switch n.Kind {
case MappingNode:
return mapTag
case SequenceNode:
return seqTag
case AliasNode:
if n.Alias != nil {
return n.Alias.ShortTag()
}
case ScalarNode:
tag, _ := resolve("", n.Value)
return tag
}
return ""
}
return shortTag(n.Tag)
}
func (n *Node) indicatedString() bool {
return n.Kind == ScalarNode &&
(shortTag(n.Tag) == strTag ||
(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
}
// SetString is a convenience function that sets the node to a string value
// and defines its style in a pleasant way depending on its content.
func (n *Node) SetString(s string) {
n.Kind = ScalarNode
if utf8.ValidString(s) {
n.Value = s
n.Tag = strTag
} else {
n.Value = encodeBase64(s)
n.Tag = binaryTag
}
if strings.Contains(n.Value, "\n") {
n.Style = LiteralStyle
}
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
// InlineUnmarshalers holds indexes to inlined fields that
// contain unmarshaler values.
InlineUnmarshalers [][]int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
var unmarshalerType reflect.Type
func init() {
var v Unmarshaler
unmarshalerType = reflect.ValueOf(&v).Elem().Type()
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
inlineUnmarshalers := [][]int(nil)
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct, reflect.Ptr:
ftype := field.Type
for ftype.Kind() == reflect.Ptr {
ftype = ftype.Elem()
}
if ftype.Kind() != reflect.Struct {
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
if reflect.PtrTo(ftype).Implements(unmarshalerType) {
inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
} else {
sinfo, err := getStructInfo(ftype)
if err != nil {
return nil, err
}
for _, index := range sinfo.InlineUnmarshalers {
inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
}
default:
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
InlineUnmarshalers: inlineUnmarshalers,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

805
vendor/gopkg.in/yaml.v3/yamlh.go generated vendored Normal file
View File

@ -0,0 +1,805 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"fmt"
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
yaml_TAIL_COMMENT_EVENT
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
yaml_TAIL_COMMENT_EVENT: "tail comment",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
newlines int // The number of line breaks since last non-break/non-blank character
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Comments
head_comment []byte // The current head comments
line_comment []byte // The current line comments
foot_comment []byte // The current foot comments
tail_comment []byte // Foot comment that happens at the end of a block.
stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
comments []yaml_comment_t // The folded comments for all parsed tokens
comments_head int
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
type yaml_comment_t struct {
scan_mark yaml_mark_t // Position where scanning for comments started
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
start_mark yaml_mark_t // Position of '#' comment mark
end_mark yaml_mark_t // Position where comment terminated
head []byte
line []byte
foot []byte
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
space_above bool // Is there's an empty line above?
foot_indent int // The indent used to write the foot comment above, or -1 if none.
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

198
vendor/gopkg.in/yaml.v3/yamlprivateh.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return (
// is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return (
// is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return (
// is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@ limitations under the License.
// This file was autogenerated by go-to-protobuf. Do not edit it manually! // This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2'; syntax = "proto2";
package k8s.io.api.core.v1; package k8s.io.api.core.v1;
@ -718,7 +718,6 @@ message Container {
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation. // when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated. // This cannot be updated.
// This is a beta feature enabled by the StartupProbe feature flag.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional // +optional
optional Probe startupProbe = 22; optional Probe startupProbe = 22;
@ -817,6 +816,7 @@ message ContainerPort {
// Protocol for port. Must be UDP, TCP, or SCTP. // Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP". // Defaults to "TCP".
// +optional // +optional
// +default="TCP"
optional string protocol = 4; optional string protocol = 4;
// What host IP to bind the external port to. // What host IP to bind the external port to.
@ -1404,7 +1404,12 @@ message EphemeralVolumeSource {
optional bool readOnly = 2; optional bool readOnly = 2;
} }
// Event is a report of an event somewhere in the cluster. // Event is a report of an event somewhere in the cluster. Events
// have a limited retention time and triggers and messages may evolve
// with time. Event consumers should not rely on the timing of an event
// with a given Reason reflecting a consistent underlying trigger, or the
// continued existence of events with that Reason. Events should be
// treated as informative, best-effort, supplemental data.
message Event { message Event {
// Standard object's metadata. // Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@ -2032,6 +2037,12 @@ message LoadBalancerIngress {
// (typically AWS load-balancers) // (typically AWS load-balancers)
// +optional // +optional
optional string hostname = 2; optional string hostname = 2;
// Ports is a list of records of service ports
// If used, every port defined in the service should have an entry in it
// +listType=atomic
// +optional
repeated PortStatus ports = 4;
} }
// LoadBalancerStatus represents the status of a load-balancer. // LoadBalancerStatus represents the status of a load-balancer.
@ -2677,17 +2688,13 @@ message PersistentVolumeClaimSpec {
optional string volumeMode = 6; optional string volumeMode = 6;
// This field can be used to specify either: // This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim) // * An existing PVC (PersistentVolumeClaim)
// * An existing custom resource/object that implements data population (Alpha) // * An existing custom resource that implements data population (Alpha)
// In order to use VolumeSnapshot object types, the appropriate feature gate // In order to use custom resource types that implement data population,
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) // the AnyVolumeDataSource feature gate must be enabled.
// If the provisioner or an external controller can support the specified data source, // If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source. // it will create a new volume based on the contents of the specified data source.
// If the specified data source is not supported, the volume will
// not be created and the failure will be reported as an event.
// In the future, we plan to support more data source types and the behavior
// of the provisioner may change.
// +optional // +optional
optional TypedLocalObjectReference dataSource = 7; optional TypedLocalObjectReference dataSource = 7;
} }
@ -3340,7 +3347,7 @@ message PodSecurityContext {
// volume types which support fsGroup based ownership(and permissions). // volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps // It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir. // and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
// +optional // +optional
optional string fsGroupChangePolicy = 9; optional string fsGroupChangePolicy = 9;
@ -3765,6 +3772,29 @@ message PodTemplateSpec {
optional PodSpec spec = 2; optional PodSpec spec = 2;
} }
message PortStatus {
// Port is the port number of the service port of which status is recorded here
optional int32 port = 1;
// Protocol is the protocol of the service port of which status is recorded here
// The supported values are: "TCP", "UDP", "SCTP"
optional string protocol = 2;
// Error is to record the problem with the service port
// The format of the error shall comply with the following rules:
// - built-in error values shall be specified in this file and those shall use
// CamelCase names
// - cloud provider specific error values must have names that comply with the
// format foo.example.com/CamelCase.
// ---
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +optional
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
optional string error = 3;
}
// PortworxVolumeSource represents a Portworx volume resource. // PortworxVolumeSource represents a Portworx volume resource.
message PortworxVolumeSource { message PortworxVolumeSource {
// VolumeID uniquely identifies a Portworx volume // VolumeID uniquely identifies a Portworx volume
@ -3853,6 +3883,7 @@ message Probe {
// Represents a projected volume source // Represents a projected volume source
message ProjectedVolumeSource { message ProjectedVolumeSource {
// list of volume projections // list of volume projections
// +optional
repeated VolumeProjection sources = 1; repeated VolumeProjection sources = 1;
// Mode bits used to set permissions on created files by default. // Mode bits used to set permissions on created files by default.
@ -4750,10 +4781,14 @@ message ServicePort {
// +optional // +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // The port on each node on which this service is exposed when type is
// Usually assigned by the system. If specified, it will be allocated to the service // NodePort or LoadBalancer. Usually assigned by the system. If a value is
// if unused or else creation of the service will fail. // specified, in-range, and not in use it will be used, otherwise the
// Default is to auto-allocate a port if the ServiceType of this Service requires one. // operation will fail. If not specified, a port will be allocated if this
// Service requires one. If this field is specified when creating a
// Service which does not need it, creation will fail. This field will be
// wiped when updating a Service to no longer need it (e.g. changing type
// from NodePort to ClusterIP).
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional // +optional
optional int32 nodePort = 5; optional int32 nodePort = 5;
@ -4791,30 +4826,68 @@ message ServiceSpec {
map<string, string> selector = 2; map<string, string> selector = 2;
// clusterIP is the IP address of the service and is usually assigned // clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in // randomly. If an address is specified manually, is in-range (as per
// use by others, it will be allocated to the service; otherwise, creation // system configuration), and is not in use, it will be allocated to the
// of the service will fail. This field can not be changed through updates. // service; otherwise creation of the service will fail. This field may not
// Valid values are "None", empty string (""), or a valid IP address. "None" // be changed through updates unless the type field is also being changed
// can be specified for headless services when proxying is not required. // to ExternalName (which requires this field to be blank) or the type
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // field is being changed from ExternalName (in which case this field may
// type is ExternalName. // optionally be specified, as describe above). Valid values are "None",
// empty string (""), or a valid IP address. Setting this to "None" makes a
// "headless service" (no virtual IP), which is useful when direct endpoint
// connections are preferred and proxying is not required. Only applies to
// types ClusterIP, NodePort, and LoadBalancer. If this field is specified
// when creating a Service of type ExternalName, creation will fail. This
// field will be wiped when updating a Service to type ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional // +optional
optional string clusterIP = 3; optional string clusterIP = 3;
// ClusterIPs is a list of IP addresses assigned to this service, and are
// usually assigned randomly. If an address is specified manually, is
// in-range (as per system configuration), and is not in use, it will be
// allocated to the service; otherwise creation of the service will fail.
// This field may not be changed through updates unless the type field is
// also being changed to ExternalName (which requires this field to be
// empty) or the type field is being changed from ExternalName (in which
// case this field may optionally be specified, as describe above). Valid
// values are "None", empty string (""), or a valid IP address. Setting
// this to "None" makes a "headless service" (no virtual IP), which is
// useful when direct endpoint connections are preferred and proxying is
// not required. Only applies to types ClusterIP, NodePort, and
// LoadBalancer. If this field is specified when creating a Service of type
// ExternalName, creation will fail. This field will be wiped when updating
// a Service to type ExternalName. If this field is not specified, it will
// be initialized from the clusterIP field. If this field is specified,
// clients must ensure that clusterIPs[0] and clusterIP have the same
// value.
//
// Unless the "IPv6DualStack" feature gate is enabled, this field is
// limited to one value, which must be the same as the clusterIP field. If
// the feature gate is enabled, this field may hold a maximum of two
// entries (dual-stack IPs, in either order). These IPs must correspond to
// the values of the ipFamilies field. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +listType=atomic
// +optional
repeated string clusterIPs = 18;
// type determines how the Service is exposed. Defaults to ClusterIP. Valid // type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer. // options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName. // "ClusterIP" allocates a cluster-internal IP address for load-balancing
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to // to endpoints. Endpoints are determined by the selector or if that is not
// endpoints. Endpoints are determined by the selector or if that is not // specified, by manual construction of an Endpoints object or
// specified, by manual construction of an Endpoints object. If clusterIP is // EndpointSlice objects. If clusterIP is "None", no virtual IP is
// "None", no virtual IP is allocated and the endpoints are published as a // allocated and the endpoints are published as a set of endpoints rather
// set of endpoints rather than a stable IP. // than a virtual IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which // "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP. // routes to the same endpoints as the clusterIP.
// "LoadBalancer" builds on NodePort and creates an // "LoadBalancer" builds on NodePort and creates an external load-balancer
// external load-balancer (if supported in the current cloud) which routes // (if supported in the current cloud) which routes to the same endpoints
// to the clusterIP. // as the clusterIP.
// "ExternalName" aliases this service to the specified externalName.
// Several other fields do not apply to ExternalName services.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional // +optional
optional string type = 4; optional string type = 4;
@ -4850,10 +4923,10 @@ message ServiceSpec {
// +optional // +optional
repeated string loadBalancerSourceRanges = 9; repeated string loadBalancerSourceRanges = 9;
// externalName is the external reference that kubedns or equivalent will // externalName is the external reference that discovery mechanisms will
// return as a CNAME record for this service. No proxying will be involved. // return as an alias for this service (e.g. a DNS CNAME record). No
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) // proxying will be involved. Must be a lowercase RFC-1123 hostname
// and requires Type to be ExternalName. // (https://tools.ietf.org/html/rfc1123) and requires Type to be
// +optional // +optional
optional string externalName = 10; optional string externalName = 10;
@ -4867,10 +4940,14 @@ message ServiceSpec {
optional string externalTrafficPolicy = 11; optional string externalTrafficPolicy = 11;
// healthCheckNodePort specifies the healthcheck nodePort for the service. // healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api // This only applies when type is set to LoadBalancer and
// backend with the allocated nodePort. Will use user-specified nodePort value // externalTrafficPolicy is set to Local. If a value is specified, is
// if specified by the client. Only effects when Type is set to LoadBalancer // in-range, and is not in use, it will be used. If not specified, a value
// and ExternalTrafficPolicy is set to Local. // will be automatically allocated. External systems (e.g. load-balancers)
// can use this port to determine if a given node holds endpoints for this
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// +optional // +optional
optional int32 healthCheckNodePort = 12; optional int32 healthCheckNodePort = 12;
@ -4889,24 +4966,6 @@ message ServiceSpec {
// +optional // +optional
optional SessionAffinityConfig sessionAffinityConfig = 14; optional SessionAffinityConfig sessionAffinityConfig = 14;
// ipFamily specifies whether this Service has a preference for a particular IP family (e.g.
// IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster,
// you can specify ipFamily when creating a ClusterIP Service to determine whether the
// controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when
// creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In
// either case, if you do not specify an ipFamily explicitly, it will default to the
// cluster's primary IP family.
// This field is part of an alpha feature, and you should not make any assumptions about its
// semantics other than those described above. In particular, you should not assume that it
// can (or cannot) be changed after creation time; that it can only have the values "IPv4"
// and "IPv6"; or that its current value on a given Service correctly reflects the current
// state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service
// is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in
// the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an
// irrelevant value anyway.)
// +optional
optional string ipFamily = 15;
// topologyKeys is a preference-order list of topology keys which // topologyKeys is a preference-order list of topology keys which
// implementations of services should use to preferentially sort endpoints // implementations of services should use to preferentially sort endpoints
// when accessing this Service, it can not be used at the same time as // when accessing this Service, it can not be used at the same time as
@ -4919,8 +4978,51 @@ message ServiceSpec {
// The special value "*" may be used to mean "any topology". This catch-all // The special value "*" may be used to mean "any topology". This catch-all
// value, if used, only makes sense as the last value in the list. // value, if used, only makes sense as the last value in the list.
// If this is not specified or empty, no topology constraints will be applied. // If this is not specified or empty, no topology constraints will be applied.
// This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.
// +optional // +optional
repeated string topologyKeys = 16; repeated string topologyKeys = 16;
// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
// service, and is gated by the "IPv6DualStack" feature gate. This field
// is usually assigned automatically based on cluster configuration and the
// ipFamilyPolicy field. If this field is specified manually, the requested
// family is available in the cluster, and ipFamilyPolicy allows it, it
// will be used; otherwise creation of the service will fail. This field
// is conditionally mutable: it allows for adding or removing a secondary
// IP family, but it does not allow changing the primary IP family of the
// Service. Valid values are "IPv4" and "IPv6". This field only applies
// to Services of types ClusterIP, NodePort, and LoadBalancer, and does
// apply to "headless" services. This field will be wiped when updating a
// Service to type ExternalName.
//
// This field may hold a maximum of two entries (dual-stack families, in
// either order). These families must correspond to the values of the
// clusterIPs field, if specified. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// +listType=atomic
// +optional
repeated string ipFamilies = 19;
// IPFamilyPolicy represents the dual-stack-ness requested or required by
// this Service, and is gated by the "IPv6DualStack" feature gate. If
// there is no value provided, then this field will be set to SingleStack.
// Services can be "SingleStack" (a single IP family), "PreferDualStack"
// (two IP families on dual-stack configured clusters or a single IP family
// on single-stack clusters), or "RequireDualStack" (two IP families on
// dual-stack configured clusters, otherwise fail). The ipFamilies and
// clusterIPs fields depend on the value of this field. This field will be
// wiped when updating a service to type ExternalName.
// +optional
optional string ipFamilyPolicy = 17;
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
// allocated for services with type LoadBalancer. Default is "true". It may be
// set to "false" if the cluster load-balancer does not rely on NodePorts.
// allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer
// and will be cleared if the type is changed to any other type.
// This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
// +optional
optional bool allocateLoadBalancerNodePorts = 20;
} }
// ServiceStatus represents the current status of a service. // ServiceStatus represents the current status of a service.
@ -4929,6 +5031,14 @@ message ServiceStatus {
// if one is present. // if one is present.
// +optional // +optional
optional LoadBalancerStatus loadBalancer = 1; optional LoadBalancerStatus loadBalancer = 1;
// Current service state
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
} }
// SessionAffinityConfig represents the configurations of session affinity. // SessionAffinityConfig represents the configurations of session affinity.

View File

@ -21,44 +21,39 @@ import (
) )
// Returns string version of ResourceName. // Returns string version of ResourceName.
func (self ResourceName) String() string { func (rn ResourceName) String() string {
return string(self) return string(rn)
} }
// Returns the CPU limit if specified. // Cpu returns the Cpu limit if specified.
func (self *ResourceList) Cpu() *resource.Quantity { func (rl *ResourceList) Cpu() *resource.Quantity {
if val, ok := (*self)[ResourceCPU]; ok { return rl.Name(ResourceCPU, resource.DecimalSI)
return &val
}
return &resource.Quantity{Format: resource.DecimalSI}
} }
// Returns the Memory limit if specified. // Memory returns the Memory limit if specified.
func (self *ResourceList) Memory() *resource.Quantity { func (rl *ResourceList) Memory() *resource.Quantity {
if val, ok := (*self)[ResourceMemory]; ok { return rl.Name(ResourceMemory, resource.BinarySI)
return &val
}
return &resource.Quantity{Format: resource.BinarySI}
} }
// Returns the Storage limit if specified. // Storage returns the Storage limit if specified.
func (self *ResourceList) Storage() *resource.Quantity { func (rl *ResourceList) Storage() *resource.Quantity {
if val, ok := (*self)[ResourceStorage]; ok { return rl.Name(ResourceStorage, resource.BinarySI)
return &val
}
return &resource.Quantity{Format: resource.BinarySI}
} }
func (self *ResourceList) Pods() *resource.Quantity { // Pods returns the list of pods
if val, ok := (*self)[ResourcePods]; ok { func (rl *ResourceList) Pods() *resource.Quantity {
return &val return rl.Name(ResourcePods, resource.DecimalSI)
}
return &resource.Quantity{}
} }
func (self *ResourceList) StorageEphemeral() *resource.Quantity { // StorageEphemeral returns the list of ephemeral storage volumes, if any
if val, ok := (*self)[ResourceEphemeralStorage]; ok { func (rl *ResourceList) StorageEphemeral() *resource.Quantity {
return rl.Name(ResourceEphemeralStorage, resource.BinarySI)
}
// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format.
func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity {
if val, ok := (*rl)[name]; ok {
return &val return &val
} }
return &resource.Quantity{} return &resource.Quantity{Format: defaultFormat}
} }

267
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -489,17 +489,13 @@ type PersistentVolumeClaimSpec struct {
// +optional // +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// This field can be used to specify either: // This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim) // * An existing PVC (PersistentVolumeClaim)
// * An existing custom resource/object that implements data population (Alpha) // * An existing custom resource that implements data population (Alpha)
// In order to use VolumeSnapshot object types, the appropriate feature gate // In order to use custom resource types that implement data population,
// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) // the AnyVolumeDataSource feature gate must be enabled.
// If the provisioner or an external controller can support the specified data source, // If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source. // it will create a new volume based on the contents of the specified data source.
// If the specified data source is not supported, the volume will
// not be created and the failure will be reported as an event.
// In the future, we plan to support more data source types and the behavior
// of the provisioner may change.
// +optional // +optional
DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
} }
@ -1615,6 +1611,7 @@ type ServiceAccountTokenProjection struct {
// Represents a projected volume source // Represents a projected volume source
type ProjectedVolumeSource struct { type ProjectedVolumeSource struct {
// list of volume projections // list of volume projections
// +optional
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits used to set permissions on created files by default. // Mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
@ -1840,6 +1837,7 @@ type ContainerPort struct {
// Protocol for port. Must be UDP, TCP, or SCTP. // Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP". // Defaults to "TCP".
// +optional // +optional
// +default="TCP"
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to. // What host IP to bind the external port to.
// +optional // +optional
@ -2287,7 +2285,6 @@ type Container struct {
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation. // when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated. // This cannot be updated.
// This is a beta feature enabled by the StartupProbe feature flag.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional // +optional
StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
@ -3298,7 +3295,7 @@ type PodSecurityContext struct {
// volume types which support fsGroup based ownership(and permissions). // volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps // It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir. // and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
// +optional // +optional
FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
// The seccomp options to use by the containers in this pod. // The seccomp options to use by the containers in this pod.
@ -3943,12 +3940,26 @@ const (
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
) )
// These are the valid conditions of a service.
const (
// LoadBalancerPortsError represents the condition of the requested ports
// on the cloud load balancer instance.
LoadBalancerPortsError = "LoadBalancerPortsError"
)
// ServiceStatus represents the current status of a service. // ServiceStatus represents the current status of a service.
type ServiceStatus struct { type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer, // LoadBalancer contains the current status of the load-balancer,
// if one is present. // if one is present.
// +optional // +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
// Current service state
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
} }
// LoadBalancerStatus represents the status of a load-balancer. // LoadBalancerStatus represents the status of a load-balancer.
@ -3971,10 +3982,21 @@ type LoadBalancerIngress struct {
// (typically AWS load-balancers) // (typically AWS load-balancers)
// +optional // +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
// Ports is a list of records of service ports
// If used, every port defined in the service should have an entry in it
// +listType=atomic
// +optional
Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
} }
const (
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
MaxServiceTopologyKeys = 16
)
// IPFamily represents the IP Family (IPv4 or IPv6). This type is used // IPFamily represents the IP Family (IPv4 or IPv6). This type is used
// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) // to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
type IPFamily string type IPFamily string
const ( const (
@ -3982,8 +4004,29 @@ const (
IPv4Protocol IPFamily = "IPv4" IPv4Protocol IPFamily = "IPv4"
// IPv6Protocol indicates that this IP is IPv6 protocol // IPv6Protocol indicates that this IP is IPv6 protocol
IPv6Protocol IPFamily = "IPv6" IPv6Protocol IPFamily = "IPv6"
// MaxServiceTopologyKeys is the largest number of topology keys allowed on a service )
MaxServiceTopologyKeys = 16
// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service
type IPFamilyPolicyType string
const (
// IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily.
// The IPFamily assigned is based on the default IPFamily used by the cluster
// or as identified by service.spec.ipFamilies field
IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack"
// IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when
// the cluster is configured for dual-stack. If the cluster is not configured
// for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not
// set in service.spec.ipFamilies then the service will be assigned the default IPFamily
// configured on the cluster
IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack"
// IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using
// IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The
// IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If
// service.spec.ipFamilies was not provided then it will be assigned according to how they are
// configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative
// IPFamily will be added by apiserver
IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack"
) )
// ServiceSpec describes the attributes that a user creates on a service. // ServiceSpec describes the attributes that a user creates on a service.
@ -4007,30 +4050,68 @@ type ServiceSpec struct {
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned // clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in // randomly. If an address is specified manually, is in-range (as per
// use by others, it will be allocated to the service; otherwise, creation // system configuration), and is not in use, it will be allocated to the
// of the service will fail. This field can not be changed through updates. // service; otherwise creation of the service will fail. This field may not
// Valid values are "None", empty string (""), or a valid IP address. "None" // be changed through updates unless the type field is also being changed
// can be specified for headless services when proxying is not required. // to ExternalName (which requires this field to be blank) or the type
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // field is being changed from ExternalName (in which case this field may
// type is ExternalName. // optionally be specified, as describe above). Valid values are "None",
// empty string (""), or a valid IP address. Setting this to "None" makes a
// "headless service" (no virtual IP), which is useful when direct endpoint
// connections are preferred and proxying is not required. Only applies to
// types ClusterIP, NodePort, and LoadBalancer. If this field is specified
// when creating a Service of type ExternalName, creation will fail. This
// field will be wiped when updating a Service to type ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional // +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// ClusterIPs is a list of IP addresses assigned to this service, and are
// usually assigned randomly. If an address is specified manually, is
// in-range (as per system configuration), and is not in use, it will be
// allocated to the service; otherwise creation of the service will fail.
// This field may not be changed through updates unless the type field is
// also being changed to ExternalName (which requires this field to be
// empty) or the type field is being changed from ExternalName (in which
// case this field may optionally be specified, as describe above). Valid
// values are "None", empty string (""), or a valid IP address. Setting
// this to "None" makes a "headless service" (no virtual IP), which is
// useful when direct endpoint connections are preferred and proxying is
// not required. Only applies to types ClusterIP, NodePort, and
// LoadBalancer. If this field is specified when creating a Service of type
// ExternalName, creation will fail. This field will be wiped when updating
// a Service to type ExternalName. If this field is not specified, it will
// be initialized from the clusterIP field. If this field is specified,
// clients must ensure that clusterIPs[0] and clusterIP have the same
// value.
//
// Unless the "IPv6DualStack" feature gate is enabled, this field is
// limited to one value, which must be the same as the clusterIP field. If
// the feature gate is enabled, this field may hold a maximum of two
// entries (dual-stack IPs, in either order). These IPs must correspond to
// the values of the ipFamilies field. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +listType=atomic
// +optional
ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid // type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer. // options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName. // "ClusterIP" allocates a cluster-internal IP address for load-balancing
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to // to endpoints. Endpoints are determined by the selector or if that is not
// endpoints. Endpoints are determined by the selector or if that is not // specified, by manual construction of an Endpoints object or
// specified, by manual construction of an Endpoints object. If clusterIP is // EndpointSlice objects. If clusterIP is "None", no virtual IP is
// "None", no virtual IP is allocated and the endpoints are published as a // allocated and the endpoints are published as a set of endpoints rather
// set of endpoints rather than a stable IP. // than a virtual IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which // "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP. // routes to the same endpoints as the clusterIP.
// "LoadBalancer" builds on NodePort and creates an // "LoadBalancer" builds on NodePort and creates an external load-balancer
// external load-balancer (if supported in the current cloud) which routes // (if supported in the current cloud) which routes to the same endpoints
// to the clusterIP. // as the clusterIP.
// "ExternalName" aliases this service to the specified externalName.
// Several other fields do not apply to ExternalName services.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional // +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
@ -4066,10 +4147,10 @@ type ServiceSpec struct {
// +optional // +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will // externalName is the external reference that discovery mechanisms will
// return as a CNAME record for this service. No proxying will be involved. // return as an alias for this service (e.g. a DNS CNAME record). No
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) // proxying will be involved. Must be a lowercase RFC-1123 hostname
// and requires Type to be ExternalName. // (https://tools.ietf.org/html/rfc1123) and requires Type to be
// +optional // +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
@ -4083,10 +4164,14 @@ type ServiceSpec struct {
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service. // healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api // This only applies when type is set to LoadBalancer and
// backend with the allocated nodePort. Will use user-specified nodePort value // externalTrafficPolicy is set to Local. If a value is specified, is
// if specified by the client. Only effects when Type is set to LoadBalancer // in-range, and is not in use, it will be used. If not specified, a value
// and ExternalTrafficPolicy is set to Local. // will be automatically allocated. External systems (e.g. load-balancers)
// can use this port to determine if a given node holds endpoints for this
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// +optional // +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
@ -4105,24 +4190,6 @@ type ServiceSpec struct {
// +optional // +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
// ipFamily specifies whether this Service has a preference for a particular IP family (e.g.
// IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster,
// you can specify ipFamily when creating a ClusterIP Service to determine whether the
// controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when
// creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In
// either case, if you do not specify an ipFamily explicitly, it will default to the
// cluster's primary IP family.
// This field is part of an alpha feature, and you should not make any assumptions about its
// semantics other than those described above. In particular, you should not assume that it
// can (or cannot) be changed after creation time; that it can only have the values "IPv4"
// and "IPv6"; or that its current value on a given Service correctly reflects the current
// state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service
// is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in
// the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an
// irrelevant value anyway.)
// +optional
IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
// topologyKeys is a preference-order list of topology keys which // topologyKeys is a preference-order list of topology keys which
// implementations of services should use to preferentially sort endpoints // implementations of services should use to preferentially sort endpoints
// when accessing this Service, it can not be used at the same time as // when accessing this Service, it can not be used at the same time as
@ -4135,8 +4202,54 @@ type ServiceSpec struct {
// The special value "*" may be used to mean "any topology". This catch-all // The special value "*" may be used to mean "any topology". This catch-all
// value, if used, only makes sense as the last value in the list. // value, if used, only makes sense as the last value in the list.
// If this is not specified or empty, no topology constraints will be applied. // If this is not specified or empty, no topology constraints will be applied.
// This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.
// +optional // +optional
TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
// IPFamily is tombstoned to show why 15 is a reserved protobuf tag.
// IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
// service, and is gated by the "IPv6DualStack" feature gate. This field
// is usually assigned automatically based on cluster configuration and the
// ipFamilyPolicy field. If this field is specified manually, the requested
// family is available in the cluster, and ipFamilyPolicy allows it, it
// will be used; otherwise creation of the service will fail. This field
// is conditionally mutable: it allows for adding or removing a secondary
// IP family, but it does not allow changing the primary IP family of the
// Service. Valid values are "IPv4" and "IPv6". This field only applies
// to Services of types ClusterIP, NodePort, and LoadBalancer, and does
// apply to "headless" services. This field will be wiped when updating a
// Service to type ExternalName.
//
// This field may hold a maximum of two entries (dual-stack families, in
// either order). These families must correspond to the values of the
// clusterIPs field, if specified. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// +listType=atomic
// +optional
IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"`
// IPFamilyPolicy represents the dual-stack-ness requested or required by
// this Service, and is gated by the "IPv6DualStack" feature gate. If
// there is no value provided, then this field will be set to SingleStack.
// Services can be "SingleStack" (a single IP family), "PreferDualStack"
// (two IP families on dual-stack configured clusters or a single IP family
// on single-stack clusters), or "RequireDualStack" (two IP families on
// dual-stack configured clusters, otherwise fail). The ipFamilies and
// clusterIPs fields depend on the value of this field. This field will be
// wiped when updating a service to type ExternalName.
// +optional
IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"`
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
// allocated for services with type LoadBalancer. Default is "true". It may be
// set to "false" if the cluster load-balancer does not rely on NodePorts.
// allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer
// and will be cleared if the type is changed to any other type.
// This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
// +optional
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"`
} }
// ServicePort contains information on service's port. // ServicePort contains information on service's port.
@ -4179,10 +4292,14 @@ type ServicePort struct {
// +optional // +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // The port on each node on which this service is exposed when type is
// Usually assigned by the system. If specified, it will be allocated to the service // NodePort or LoadBalancer. Usually assigned by the system. If a value is
// if unused or else creation of the service will fail. // specified, in-range, and not in use it will be used, otherwise the
// Default is to auto-allocate a port if the ServiceType of this Service requires one. // operation will fail. If not specified, a port will be allocated if this
// Service requires one. If this field is specified when creating a
// Service which does not need it, creation will fail. This field will be
// wiped when updating a Service to no longer need it (e.g. changing type
// from NodePort to ClusterIP).
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional // +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
@ -5275,7 +5392,12 @@ const (
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster. // Event is a report of an event somewhere in the cluster. Events
// have a limited retention time and triggers and messages may evolve
// with time. Event consumers should not rely on the timing of an event
// with a given Reason reflecting a consistent underlying trigger, or the
// continued existence of events with that Reason. Events should be
// treated as informative, best-effort, supplemental data.
type Event struct { type Event struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
// Standard object's metadata. // Standard object's metadata.
@ -6104,3 +6226,26 @@ const (
// and data streams for a single forwarded connection // and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID" PortForwardRequestIDHeader = "requestID"
) )
// PortStatus represents the error condition of a service port
type PortStatus struct {
// Port is the port number of the service port of which status is recorded here
Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
// Protocol is the protocol of the service port of which status is recorded here
// The supported values are: "TCP", "UDP", "SCTP"
Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// Error is to record the problem with the service port
// The format of the error shall comply with the following rules:
// - built-in error values shall be specified in this file and those shall use
// CamelCase names
// - cloud provider specific error values must have names that comply with the
// format foo.example.com/CamelCase.
// ---
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +optional
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
}

View File

@ -339,7 +339,7 @@ var map_Container = map[string]string{
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
"terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
@ -637,7 +637,7 @@ func (EphemeralVolumeSource) SwaggerDoc() map[string]string {
} }
var map_Event = map[string]string{ var map_Event = map[string]string{
"": "Event is a report of an event somewhere in the cluster.", "": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"involvedObject": "The object that this event is about.", "involvedObject": "The object that this event is about.",
"reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
@ -953,6 +953,7 @@ var map_LoadBalancerIngress = map[string]string{
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
"hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
"ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it",
} }
func (LoadBalancerIngress) SwaggerDoc() map[string]string { func (LoadBalancerIngress) SwaggerDoc() map[string]string {
@ -1310,7 +1311,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
"dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.",
} }
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@ -1602,7 +1603,7 @@ var map_PodSecurityContext = map[string]string{
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.",
"seccompProfile": "The seccomp options to use by the containers in this pod.", "seccompProfile": "The seccomp options to use by the containers in this pod.",
} }
@ -1723,6 +1724,16 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string {
return map_PodTemplateSpec return map_PodTemplateSpec
} }
var map_PortStatus = map[string]string{
"port": "Port is the port number of the service port of which status is recorded here",
"protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"",
"error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.",
}
func (PortStatus) SwaggerDoc() map[string]string {
return map_PortStatus
}
var map_PortworxVolumeSource = map[string]string{ var map_PortworxVolumeSource = map[string]string{
"": "PortworxVolumeSource represents a Portworx volume resource.", "": "PortworxVolumeSource represents a Portworx volume resource.",
"volumeID": "VolumeID uniquely identifies a Portworx volume", "volumeID": "VolumeID uniquely identifies a Portworx volume",
@ -2209,7 +2220,7 @@ var map_ServicePort = map[string]string{
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.",
"port": "The port that will be exposed by this service.", "port": "The port that will be exposed by this service.",
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
"nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
} }
func (ServicePort) SwaggerDoc() map[string]string { func (ServicePort) SwaggerDoc() map[string]string {
@ -2229,19 +2240,22 @@ var map_ServiceSpec = map[string]string{
"": "ServiceSpec describes the attributes that a user creates on a service.", "": "ServiceSpec describes the attributes that a user creates on a service.",
"ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
"clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "clusterIPs": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
"externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
"loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
"externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be",
"externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
"healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).",
"publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.",
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
"ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6) when the IPv6DualStack feature gate is enabled. In a dual-stack cluster, you can specify ipFamily when creating a ClusterIP Service to determine whether the controller will allocate an IPv4 or IPv6 IP for it, and you can specify ipFamily when creating a headless Service to determine whether it will have IPv4 or IPv6 Endpoints. In either case, if you do not specify an ipFamily explicitly, it will default to the cluster's primary IP family. This field is part of an alpha feature, and you should not make any assumptions about its semantics other than those described above. In particular, you should not assume that it can (or cannot) be changed after creation time; that it can only have the values \"IPv4\" and \"IPv6\"; or that its current value on a given Service correctly reflects the current state of that Service. (For ClusterIP Services, look at clusterIP to see if the Service is IPv4 or IPv6. For headless Services, look at the endpoints, which may be dual-stack in the future. For ExternalName Services, ipFamily has no meaning, but it may be set to an irrelevant value anyway.)", "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.",
"topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.",
"ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.",
"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.",
} }
func (ServiceSpec) SwaggerDoc() map[string]string { func (ServiceSpec) SwaggerDoc() map[string]string {
@ -2251,6 +2265,7 @@ func (ServiceSpec) SwaggerDoc() map[string]string {
var map_ServiceStatus = map[string]string{ var map_ServiceStatus = map[string]string{
"": "ServiceStatus represents the current status of a service.", "": "ServiceStatus represents the current status of a service.",
"loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.",
"conditions": "Current service state",
} }
func (ServiceStatus) SwaggerDoc() map[string]string { func (ServiceStatus) SwaggerDoc() map[string]string {

View File

@ -19,10 +19,16 @@ package v1
const ( const (
LabelHostname = "kubernetes.io/hostname" LabelHostname = "kubernetes.io/hostname"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" LabelFailureDomainBetaZone = "failure-domain.beta.kubernetes.io/zone"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region"
LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" LabelTopologyZone = "topology.kubernetes.io/zone"
LabelZoneRegionStable = "topology.kubernetes.io/region" LabelTopologyRegion = "topology.kubernetes.io/region"
// Legacy names for compat.
LabelZoneFailureDomain = LabelFailureDomainBetaZone // deprecated, remove after 1.20
LabelZoneRegion = LabelFailureDomainBetaRegion // deprecated, remove after 1.20
LabelZoneFailureDomainStable = LabelTopologyZone
LabelZoneRegionStable = LabelTopologyRegion
LabelInstanceType = "beta.kubernetes.io/instance-type" LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelInstanceTypeStable = "node.kubernetes.io/instance-type" LabelInstanceTypeStable = "node.kubernetes.io/instance-type"

View File

@ -2144,6 +2144,13 @@ func (in *List) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
*out = *in *out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }
@ -2163,7 +2170,9 @@ func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
if in.Ingress != nil { if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress in, out := &in.Ingress, &out.Ingress
*out = make([]LoadBalancerIngress, len(*in)) *out = make([]LoadBalancerIngress, len(*in))
copy(*out, *in) for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
} }
return return
} }
@ -4079,6 +4088,27 @@ func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortStatus) DeepCopyInto(out *PortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus.
func (in *PortStatus) DeepCopy() *PortStatus {
if in == nil {
return nil
}
out := new(PortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) {
*out = *in *out = *in
@ -5270,6 +5300,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.ClusterIPs != nil {
in, out := &in.ClusterIPs, &out.ClusterIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExternalIPs != nil { if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs in, out := &in.ExternalIPs, &out.ExternalIPs
*out = make([]string, len(*in)) *out = make([]string, len(*in))
@ -5285,16 +5320,26 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = new(SessionAffinityConfig) *out = new(SessionAffinityConfig)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.IPFamily != nil {
in, out := &in.IPFamily, &out.IPFamily
*out = new(IPFamily)
**out = **in
}
if in.TopologyKeys != nil { if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in)) *out = make([]string, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.IPFamilies != nil {
in, out := &in.IPFamilies, &out.IPFamilies
*out = make([]IPFamily, len(*in))
copy(*out, *in)
}
if in.IPFamilyPolicy != nil {
in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
*out = new(IPFamilyPolicyType)
**out = **in
}
if in.AllocateLoadBalancerNodePorts != nil {
in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts
*out = new(bool)
**out = **in
}
return return
} }
@ -5312,6 +5357,13 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec {
func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
*out = *in *out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
// This file was autogenerated by go-to-protobuf. Do not edit it manually! // This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2'; syntax = "proto2";
package k8s.io.apimachinery.pkg.api.resource; package k8s.io.apimachinery.pkg.api.resource;

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"math"
"math/big" "math/big"
"strconv" "strconv"
"strings" "strings"
@ -120,7 +121,7 @@ const (
) )
// MustParse turns the given string into a quantity or panics; for tests // MustParse turns the given string into a quantity or panics; for tests
// or others cases where you know the string is valid. // or other cases where you know the string is valid.
func MustParse(str string) Quantity { func MustParse(str string) Quantity {
q, err := ParseQuantity(str) q, err := ParseQuantity(str)
if err != nil { if err != nil {
@ -442,6 +443,36 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
} }
} }
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
// multiply by the appropriate exponential scale
switch q.Format {
case DecimalExponent, DecimalSI:
return base * math.Pow10(exponent)
default:
// fast path for exponents that can fit in 64 bits
if exponent > 0 && exponent < 7 {
return base * float64(int64(1)<<(exponent*10))
}
return base * math.Pow(2, float64(exponent*10))
}
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion // AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity. // is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) { func (q *Quantity) AsInt64() (int64, bool) {
@ -598,6 +629,9 @@ const int64QuantityExpectedBytes = 18
// String is an expensive operation and caching this result significantly reduces the cost of // String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity. // normal parse / marshal operations on Quantity.
func (q *Quantity) String() string { func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 { if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes) result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result) number, suffix := q.CanonicalizeBytes(result)

View File

@ -26,6 +26,5 @@ reviewers:
- mml - mml
- mbohlool - mbohlool
- therc - therc
- mqliang
- kevin-wangzefeng - kevin-wangzefeng
- jianhuiz - jianhuiz

View File

@ -17,7 +17,7 @@ limitations under the License.
// This file was autogenerated by go-to-protobuf. Do not edit it manually! // This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2'; syntax = "proto2";
package k8s.io.apimachinery.pkg.apis.meta.v1; package k8s.io.apimachinery.pkg.apis.meta.v1;
@ -375,6 +375,7 @@ message GroupVersionResource {
// A label selector is a label query over a set of resources. The result of matchLabels and // A label selector is a label query over a set of resources. The result of matchLabels and
// matchExpressions are ANDed. An empty label selector matches all objects. A null // matchExpressions are ANDed. An empty label selector matches all objects. A null
// label selector matches no objects. // label selector matches no objects.
// +structType=atomic
message LabelSelector { message LabelSelector {
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
// map is equivalent to an element of matchExpressions, whose key field is "key", the // map is equivalent to an element of matchExpressions, whose key field is "key", the

View File

@ -34,6 +34,9 @@ type GroupResource struct {
} }
func (gr *GroupResource) String() string { func (gr *GroupResource) String() string {
if gr == nil {
return "<nil>"
}
if len(gr.Group) == 0 { if len(gr.Group) == 0 {
return gr.Resource return gr.Resource
} }
@ -51,6 +54,9 @@ type GroupVersionResource struct {
} }
func (gvr *GroupVersionResource) String() string { func (gvr *GroupVersionResource) String() string {
if gvr == nil {
return "<nil>"
}
return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
} }
@ -64,6 +70,9 @@ type GroupKind struct {
} }
func (gk *GroupKind) String() string { func (gk *GroupKind) String() string {
if gk == nil {
return "<nil>"
}
if len(gk.Group) == 0 { if len(gk.Group) == 0 {
return gk.Kind return gk.Kind
} }

View File

@ -201,6 +201,20 @@ func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) {
obj.Annotations[ann] = value obj.Annotations[ann] = value
} }
// HasLabel returns a bool if passed in label exists
func HasLabel(obj ObjectMeta, label string) bool {
_, found := obj.Labels[label]
return found
}
// SetMetaDataLabel sets the label and value
func SetMetaDataLabel(obj *ObjectMeta, label string, value string) {
if obj.Labels == nil {
obj.Labels = make(map[string]string)
}
obj.Labels[label] = value
}
// SingleObject returns a ListOptions for watching a single object. // SingleObject returns a ListOptions for watching a single object.
func SingleObject(meta ObjectMeta) ListOptions { func SingleObject(meta ObjectMeta) ListOptions {
return ListOptions{ return ListOptions{

View File

@ -19,8 +19,6 @@ package v1
import ( import (
"encoding/json" "encoding/json"
"time" "time"
"github.com/google/gofuzz"
) )
const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
@ -181,16 +179,3 @@ func (t MicroTime) MarshalQueryParameter() (string, error) {
return t.UTC().Format(RFC3339Micro), nil return t.UTC().Format(RFC3339Micro), nil
} }
// Fuzz satisfies fuzz.Interface.
func (t *MicroTime) Fuzz(c fuzz.Continue) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Accurate to a tenth of
// micro second. Leave off nanoseconds because JSON doesn't
// represent them so they can't round-trip properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
}
var _ fuzz.Interface = &MicroTime{}

View File

@ -0,0 +1,39 @@
// +build !notest
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
fuzz "github.com/google/gofuzz"
)
// Fuzz satisfies fuzz.Interface.
func (t *MicroTime) Fuzz(c fuzz.Continue) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Accurate to a tenth of
// micro second. Leave off nanoseconds because JSON doesn't
// represent them so they can't round-trip properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
}
// ensure MicroTime implements fuzz.Interface
var _ fuzz.Interface = &MicroTime{}

View File

@ -19,8 +19,6 @@ package v1
import ( import (
"encoding/json" "encoding/json"
"time" "time"
fuzz "github.com/google/gofuzz"
) )
// Time is a wrapper around time.Time which supports correct // Time is a wrapper around time.Time which supports correct
@ -182,16 +180,3 @@ func (t Time) MarshalQueryParameter() (string, error) {
return t.UTC().Format(time.RFC3339), nil return t.UTC().Format(time.RFC3339), nil
} }
// Fuzz satisfies fuzz.Interface.
func (t *Time) Fuzz(c fuzz.Continue) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Leave off nanoseconds
// because JSON doesn't represent them so they can't round-trip
// properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
}
var _ fuzz.Interface = &Time{}

View File

@ -0,0 +1,39 @@
// +build !notest
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
fuzz "github.com/google/gofuzz"
)
// Fuzz satisfies fuzz.Interface.
func (t *Time) Fuzz(c fuzz.Continue) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Leave off nanoseconds
// because JSON doesn't represent them so they can't round-trip
// properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
}
// ensure Time implements fuzz.Interface
var _ fuzz.Interface = &Time{}

View File

@ -1092,6 +1092,7 @@ type Patch struct{}
// A label selector is a label query over a set of resources. The result of matchLabels and // A label selector is a label query over a set of resources. The result of matchLabels and
// matchExpressions are ANDed. An empty label selector matches all objects. A null // matchExpressions are ANDed. An empty label selector matches all objects. A null
// label selector matches no objects. // label selector matches no objects.
// +structType=atomic
type LabelSelector struct { type LabelSelector struct {
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
// map is equivalent to an element of matchExpressions, whose key field is "key", the // map is equivalent to an element of matchExpressions, whose key field is "key", the

View File

@ -282,14 +282,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string {
return val return val
} }
func getNestedInt64(obj map[string]interface{}, fields ...string) int64 {
val, found, err := NestedInt64(obj, fields...)
if !found || err != nil {
return 0
}
return val
}
func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
val, found, err := NestedInt64(obj, fields...) val, found, err := NestedInt64(obj, fields...)
if !found || err != nil { if !found || err != nil {

View File

@ -26,16 +26,6 @@ type typePair struct {
dest reflect.Type dest reflect.Type
} }
type typeNamePair struct {
fieldType reflect.Type
fieldName string
}
// DebugLogger allows you to get debugging messages if necessary.
type DebugLogger interface {
Logf(format string, args ...interface{})
}
type NameFunc func(t reflect.Type) string type NameFunc func(t reflect.Type) string
var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } var DefaultNameFunc = func(t reflect.Type) string { return t.Name() }
@ -57,24 +47,6 @@ type Converter struct {
ignoredConversions map[typePair]struct{} ignoredConversions map[typePair]struct{}
ignoredUntypedConversions map[typePair]struct{} ignoredUntypedConversions map[typePair]struct{}
// This is a map from a source field type and name, to a list of destination
// field type and name.
structFieldDests map[typeNamePair][]typeNamePair
// Allows for the opposite lookup of structFieldDests. So that SourceFromDest
// copy flag also works. So this is a map of destination field name, to potential
// source field name and type to look for.
structFieldSources map[typeNamePair][]typeNamePair
// Map from an input type to a function which can apply a key name mapping
inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc
// Map from an input type to a set of default conversion flags.
inputDefaultFlags map[reflect.Type]FieldMatchingFlags
// If non-nil, will be called to print helpful debugging info. Quite verbose.
Debug DebugLogger
// nameFunc is called to retrieve the name of a type; this name is used for the // nameFunc is called to retrieve the name of a type; this name is used for the
// purpose of deciding whether two types match or not (i.e., will we attempt to // purpose of deciding whether two types match or not (i.e., will we attempt to
// do a conversion). The default returns the go type name. // do a conversion). The default returns the go type name.
@ -89,11 +61,6 @@ func NewConverter(nameFn NameFunc) *Converter {
ignoredConversions: make(map[typePair]struct{}), ignoredConversions: make(map[typePair]struct{}),
ignoredUntypedConversions: make(map[typePair]struct{}), ignoredUntypedConversions: make(map[typePair]struct{}),
nameFunc: nameFn, nameFunc: nameFn,
structFieldDests: make(map[typeNamePair][]typeNamePair),
structFieldSources: make(map[typeNamePair][]typeNamePair),
inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc),
inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags),
} }
c.RegisterUntypedConversionFunc( c.RegisterUntypedConversionFunc(
(*[]byte)(nil), (*[]byte)(nil), (*[]byte)(nil), (*[]byte)(nil),
@ -112,11 +79,9 @@ func (c *Converter) WithConversions(fns ConversionFuncs) *Converter {
return &copied return &copied
} }
// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. // DefaultMeta returns meta for a given type.
func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { func (c *Converter) DefaultMeta(t reflect.Type) *Meta {
return c.inputDefaultFlags[t], &Meta{ return &Meta{}
KeyNameMapping: c.inputFieldMappingFuncs[t],
}
} }
// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte // Convert_Slice_byte_To_Slice_byte prevents recursing into every byte
@ -136,24 +101,12 @@ func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error {
type Scope interface { type Scope interface {
// Call Convert to convert sub-objects. Note that if you call it with your own exact // Call Convert to convert sub-objects. Note that if you call it with your own exact
// parameters, you'll run out of stack space before anything useful happens. // parameters, you'll run out of stack space before anything useful happens.
Convert(src, dest interface{}, flags FieldMatchingFlags) error Convert(src, dest interface{}) error
// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
// If the enclosing object was not a struct, then these will contain no tags, of course.
SrcTag() reflect.StructTag
DestTag() reflect.StructTag
// Flags returns the flags with which the conversion was started.
Flags() FieldMatchingFlags
// Meta returns any information originally passed to Convert. // Meta returns any information originally passed to Convert.
Meta() *Meta Meta() *Meta
} }
// FieldMappingFunc can convert an input field value into different values, depending on
// the value of the source or destination struct tags.
type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string)
func NewConversionFuncs() ConversionFuncs { func NewConversionFuncs() ConversionFuncs {
return ConversionFuncs{ return ConversionFuncs{
untyped: make(map[typePair]ConversionFunc), untyped: make(map[typePair]ConversionFunc),
@ -194,9 +147,6 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
// Meta is supplied by Scheme, when it calls Convert. // Meta is supplied by Scheme, when it calls Convert.
type Meta struct { type Meta struct {
// KeyNameMapping is an optional function which may map the listed key (field name)
// into a source and destination value.
KeyNameMapping FieldMappingFunc
// Context is an optional field that callers may use to pass info to conversion functions. // Context is an optional field that callers may use to pass info to conversion functions.
Context interface{} Context interface{}
} }
@ -205,84 +155,11 @@ type Meta struct {
type scope struct { type scope struct {
converter *Converter converter *Converter
meta *Meta meta *Meta
flags FieldMatchingFlags
// srcStack & destStack are separate because they may not have a 1:1
// relationship.
srcStack scopeStack
destStack scopeStack
}
type scopeStackElem struct {
tag reflect.StructTag
value reflect.Value
key string
}
type scopeStack []scopeStackElem
func (s *scopeStack) pop() {
n := len(*s)
*s = (*s)[:n-1]
}
func (s *scopeStack) push(e scopeStackElem) {
*s = append(*s, e)
}
func (s *scopeStack) top() *scopeStackElem {
return &(*s)[len(*s)-1]
}
func (s scopeStack) describe() string {
desc := ""
if len(s) > 1 {
desc = "(" + s[1].value.Type().String() + ")"
}
for i, v := range s {
if i < 2 {
// First layer on stack is not real; second is handled specially above.
continue
}
if v.key == "" {
desc += fmt.Sprintf(".%v", v.value.Type())
} else {
desc += fmt.Sprintf(".%v", v.key)
}
}
return desc
}
// Formats src & dest as indices for printing.
func (s *scope) setIndices(src, dest int) {
s.srcStack.top().key = fmt.Sprintf("[%v]", src)
s.destStack.top().key = fmt.Sprintf("[%v]", dest)
}
// Formats src & dest as map keys for printing.
func (s *scope) setKeys(src, dest interface{}) {
s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src)
s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest)
} }
// Convert continues a conversion. // Convert continues a conversion.
func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { func (s *scope) Convert(src, dest interface{}) error {
return s.converter.Convert(src, dest, flags, s.meta) return s.converter.Convert(src, dest, s.meta)
}
// SrcTag returns the tag of the struct containing the current source item, if any.
func (s *scope) SrcTag() reflect.StructTag {
return s.srcStack.top().tag
}
// DestTag returns the tag of the struct containing the current dest item, if any.
func (s *scope) DestTag() reflect.StructTag {
return s.destStack.top().tag
}
// Flags returns the flags with which the current conversion was started.
func (s *scope) Flags() FieldMatchingFlags {
return s.flags
} }
// Meta returns the meta object that was originally passed to Convert. // Meta returns the meta object that was originally passed to Convert.
@ -290,50 +167,6 @@ func (s *scope) Meta() *Meta {
return s.meta return s.meta
} }
// describe prints the path to get to the current (source, dest) values.
func (s *scope) describe() (src, dest string) {
return s.srcStack.describe(), s.destStack.describe()
}
// error makes an error that includes information about where we were in the objects
// we were asked to convert.
func (s *scope) errorf(message string, args ...interface{}) error {
srcPath, destPath := s.describe()
where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath)
return fmt.Errorf(where+message, args...)
}
// Verifies whether a conversion function has a correct signature.
func verifyConversionFunctionSignature(ft reflect.Type) error {
if ft.Kind() != reflect.Func {
return fmt.Errorf("expected func, got: %v", ft)
}
if ft.NumIn() != 3 {
return fmt.Errorf("expected three 'in' params, got: %v", ft)
}
if ft.NumOut() != 1 {
return fmt.Errorf("expected one 'out' param, got: %v", ft)
}
if ft.In(0).Kind() != reflect.Ptr {
return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft)
}
if ft.In(1).Kind() != reflect.Ptr {
return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft)
}
scopeType := Scope(nil)
if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a {
return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft)
}
var forErrorType error
// This convolution is necessary, otherwise TypeOf picks up on the fact
// that forErrorType is nil.
errorType := reflect.TypeOf(&forErrorType).Elem()
if ft.Out(0) != errorType {
return fmt.Errorf("expected error return, got: %v", ft)
}
return nil
}
// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those
// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
// any other guarantee. // any other guarantee.
@ -364,71 +197,16 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
return nil return nil
} }
// RegisterInputDefaults registers a field name mapping function, used when converting
// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping
// applied automatically if the input matches in. A set of default flags for the input conversion
// may also be provided, which will be used when no explicit flags are requested.
func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error {
fv := reflect.ValueOf(in)
ft := fv.Type()
if ft.Kind() != reflect.Ptr {
return fmt.Errorf("expected pointer 'in' argument, got: %v", ft)
}
c.inputFieldMappingFuncs[ft] = fn
c.inputDefaultFlags[ft] = defaultFlags
return nil
}
// FieldMatchingFlags contains a list of ways in which struct fields could be
// copied. These constants may be | combined.
type FieldMatchingFlags int
const (
// Loop through destination fields, search for matching source
// field to copy it from. Source fields with no corresponding
// destination field will be ignored. If SourceToDest is
// specified, this flag is ignored. If neither is specified,
// or no flags are passed, this flag is the default.
DestFromSource FieldMatchingFlags = 0
// Loop through source fields, search for matching dest field
// to copy it into. Destination fields with no corresponding
// source field will be ignored.
SourceToDest FieldMatchingFlags = 1 << iota
// Don't treat it as an error if the corresponding source or
// dest field can't be found.
IgnoreMissingFields
// Don't require type names to match.
AllowDifferentFieldTypeNames
)
// IsSet returns true if the given flag or combination of flags is set.
func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool {
if flag == DestFromSource {
// The bit logic doesn't work on the default value.
return f&SourceToDest != SourceToDest
}
return f&flag == flag
}
// Convert will translate src to dest if it knows how. Both must be pointers. // Convert will translate src to dest if it knows how. Both must be pointers.
// If no conversion func is registered and the default copying mechanism // If no conversion func is registered and the default copying mechanism
// doesn't work on this type pair, an error will be returned. // doesn't work on this type pair, an error will be returned.
// Read the comments on the various FieldMatchingFlags constants to understand
// what the 'flags' parameter does.
// 'meta' is given to allow you to pass information to conversion functions, // 'meta' is given to allow you to pass information to conversion functions,
// it is not used by Convert() other than storing it in the scope. // it is not used by Convert() other than storing it in the scope.
// Not safe for objects with cyclic references! // Not safe for objects with cyclic references!
func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { func (c *Converter) Convert(src, dest interface{}, meta *Meta) error {
return c.doConversion(src, dest, flags, meta, c.convert)
}
type conversionFunc func(sv, dv reflect.Value, scope *scope) error
func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)}
scope := &scope{ scope := &scope{
converter: c, converter: c,
flags: flags,
meta: meta, meta: meta,
} }
@ -452,366 +230,4 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
return err return err
} }
return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type()) return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type())
// TODO: Everything past this point is deprecated.
// Remove in 1.20 once we're sure it didn't break anything.
// Leave something on the stack, so that calls to struct tag getters never fail.
scope.srcStack.push(scopeStackElem{})
scope.destStack.push(scopeStackElem{})
return f(sv, dv, scope)
}
// callUntyped calls predefined conversion func.
func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error {
if !dv.CanAddr() {
return scope.errorf("cant addr dest")
}
var svPointer reflect.Value
if sv.CanAddr() {
svPointer = sv.Addr()
} else {
svPointer = reflect.New(sv.Type())
svPointer.Elem().Set(sv)
}
dvPointer := dv.Addr()
return f(svPointer.Interface(), dvPointer.Interface(), scope)
}
// convert recursively copies sv into dv, calling an appropriate conversion function if
// one is registered.
func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
dt, st := dv.Type(), sv.Type()
pair := typePair{st, dt}
// ignore conversions of this type
if _, ok := c.ignoredConversions[pair]; ok {
if c.Debug != nil {
c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt)
}
return nil
}
// Convert sv to dv.
pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())}
if f, ok := c.conversionFuncs.untyped[pair]; ok {
return c.callUntyped(sv, dv, f, scope)
}
if f, ok := c.generatedConversionFuncs.untyped[pair]; ok {
return c.callUntyped(sv, dv, f, scope)
}
if !dv.CanSet() {
return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
}
if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) {
return scope.errorf(
"type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
c.nameFunc(st), c.nameFunc(dt), st, dt)
}
switch st.Kind() {
case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
// Don't copy these via assignment/conversion!
default:
// This should handle all simple types.
if st.AssignableTo(dt) {
dv.Set(sv)
return nil
}
if st.ConvertibleTo(dt) {
dv.Set(sv.Convert(dt))
return nil
}
}
if c.Debug != nil {
c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt)
}
scope.srcStack.push(scopeStackElem{value: sv})
scope.destStack.push(scopeStackElem{value: dv})
defer scope.srcStack.pop()
defer scope.destStack.pop()
switch dv.Kind() {
case reflect.Struct:
return c.convertKV(toKVValue(sv), toKVValue(dv), scope)
case reflect.Slice:
if sv.IsNil() {
// Don't make a zero-length slice.
dv.Set(reflect.Zero(dt))
return nil
}
dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
for i := 0; i < sv.Len(); i++ {
scope.setIndices(i, i)
if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil {
return err
}
}
case reflect.Ptr:
if sv.IsNil() {
// Don't copy a nil ptr!
dv.Set(reflect.Zero(dt))
return nil
}
dv.Set(reflect.New(dt.Elem()))
switch st.Kind() {
case reflect.Ptr, reflect.Interface:
return c.convert(sv.Elem(), dv.Elem(), scope)
default:
return c.convert(sv, dv.Elem(), scope)
}
case reflect.Map:
if sv.IsNil() {
// Don't copy a nil ptr!
dv.Set(reflect.Zero(dt))
return nil
}
dv.Set(reflect.MakeMap(dt))
for _, sk := range sv.MapKeys() {
dk := reflect.New(dt.Key()).Elem()
if err := c.convert(sk, dk, scope); err != nil {
return err
}
dkv := reflect.New(dt.Elem()).Elem()
scope.setKeys(sk.Interface(), dk.Interface())
// TODO: sv.MapIndex(sk) may return a value with CanAddr() == false,
// because a map[string]struct{} does not allow a pointer reference.
// Calling a custom conversion function defined for the map value
// will panic. Example is PodInfo map[string]ContainerStatus.
if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil {
return err
}
dv.SetMapIndex(dk, dkv)
}
case reflect.Interface:
if sv.IsNil() {
// Don't copy a nil interface!
dv.Set(reflect.Zero(dt))
return nil
}
tmpdv := reflect.New(sv.Elem().Type()).Elem()
if err := c.convert(sv.Elem(), tmpdv, scope); err != nil {
return err
}
dv.Set(reflect.ValueOf(tmpdv.Interface()))
return nil
default:
return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
}
return nil
}
var stringType = reflect.TypeOf("")
func toKVValue(v reflect.Value) kvValue {
switch v.Kind() {
case reflect.Struct:
return structAdaptor(v)
case reflect.Map:
if v.Type().Key().AssignableTo(stringType) {
return stringMapAdaptor(v)
}
}
return nil
}
// kvValue lets us write the same conversion logic to work with both maps
// and structs. Only maps with string keys make sense for this.
type kvValue interface {
// returns all keys, as a []string.
keys() []string
// Will just return "" for maps.
tagOf(key string) reflect.StructTag
// Will return the zero Value if the key doesn't exist.
value(key string) reflect.Value
// Maps require explicit setting-- will do nothing for structs.
// Returns false on failure.
confirmSet(key string, v reflect.Value) bool
}
type stringMapAdaptor reflect.Value
func (a stringMapAdaptor) len() int {
return reflect.Value(a).Len()
}
func (a stringMapAdaptor) keys() []string {
v := reflect.Value(a)
keys := make([]string, v.Len())
for i, v := range v.MapKeys() {
if v.IsNil() {
continue
}
switch t := v.Interface().(type) {
case string:
keys[i] = t
}
}
return keys
}
func (a stringMapAdaptor) tagOf(key string) reflect.StructTag {
return ""
}
func (a stringMapAdaptor) value(key string) reflect.Value {
return reflect.Value(a).MapIndex(reflect.ValueOf(key))
}
func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool {
return true
}
type structAdaptor reflect.Value
func (a structAdaptor) len() int {
v := reflect.Value(a)
return v.Type().NumField()
}
func (a structAdaptor) keys() []string {
v := reflect.Value(a)
t := v.Type()
keys := make([]string, t.NumField())
for i := range keys {
keys[i] = t.Field(i).Name
}
return keys
}
func (a structAdaptor) tagOf(key string) reflect.StructTag {
v := reflect.Value(a)
field, ok := v.Type().FieldByName(key)
if ok {
return field.Tag
}
return ""
}
func (a structAdaptor) value(key string) reflect.Value {
v := reflect.Value(a)
return v.FieldByName(key)
}
func (a structAdaptor) confirmSet(key string, v reflect.Value) bool {
return true
}
// convertKV can convert things that consist of key/value pairs, like structs
// and some maps.
func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error {
if skv == nil || dkv == nil {
// TODO: add keys to stack to support really understandable error messages.
return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv)
}
lister := dkv
if scope.flags.IsSet(SourceToDest) {
lister = skv
}
var mapping FieldMappingFunc
if scope.meta != nil && scope.meta.KeyNameMapping != nil {
mapping = scope.meta.KeyNameMapping
}
for _, key := range lister.keys() {
if found, err := c.checkField(key, skv, dkv, scope); found {
if err != nil {
return err
}
continue
}
stag := skv.tagOf(key)
dtag := dkv.tagOf(key)
skey := key
dkey := key
if mapping != nil {
skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag)
}
df := dkv.value(dkey)
sf := skv.value(skey)
if !df.IsValid() || !sf.IsValid() {
switch {
case scope.flags.IsSet(IgnoreMissingFields):
// No error.
case scope.flags.IsSet(SourceToDest):
return scope.errorf("%v not present in dest", dkey)
default:
return scope.errorf("%v not present in src", skey)
}
continue
}
scope.srcStack.top().key = skey
scope.srcStack.top().tag = stag
scope.destStack.top().key = dkey
scope.destStack.top().tag = dtag
if err := c.convert(sf, df, scope); err != nil {
return err
}
}
return nil
}
// checkField returns true if the field name matches any of the struct
// field copying rules. The error should be ignored if it returns false.
func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) {
replacementMade := false
if scope.flags.IsSet(DestFromSource) {
df := dkv.value(fieldName)
if !df.IsValid() {
return false, nil
}
destKey := typeNamePair{df.Type(), fieldName}
// Check each of the potential source (type, name) pairs to see if they're
// present in sv.
for _, potentialSourceKey := range c.structFieldSources[destKey] {
sf := skv.value(potentialSourceKey.fieldName)
if !sf.IsValid() {
continue
}
if sf.Type() == potentialSourceKey.fieldType {
// Both the source's name and type matched, so copy.
scope.srcStack.top().key = potentialSourceKey.fieldName
scope.destStack.top().key = fieldName
if err := c.convert(sf, df, scope); err != nil {
return true, err
}
dkv.confirmSet(fieldName, df)
replacementMade = true
}
}
return replacementMade, nil
}
sf := skv.value(fieldName)
if !sf.IsValid() {
return false, nil
}
srcKey := typeNamePair{sf.Type(), fieldName}
// Check each of the potential dest (type, name) pairs to see if they're
// present in dv.
for _, potentialDestKey := range c.structFieldDests[srcKey] {
df := dkv.value(potentialDestKey.fieldName)
if !df.IsValid() {
continue
}
if df.Type() == potentialDestKey.fieldType {
// Both the dest's name and type matched, so copy.
scope.srcStack.top().key = fieldName
scope.destStack.top().key = potentialDestKey.fieldName
if err := c.convert(sf, df, scope); err != nil {
return true, err
}
dkv.confirmSet(potentialDestKey.fieldName, df)
replacementMade = true
}
}
return replacementMade, nil
} }

View File

@ -141,25 +141,6 @@ func Equals(labels1, labels2 Set) bool {
return true return true
} }
// AreLabelsInWhiteList verifies if the provided label list
// is in the provided whitelist and returns true, otherwise false.
func AreLabelsInWhiteList(labels, whitelist Set) bool {
if len(whitelist) == 0 {
return true
}
for k, v := range labels {
value, ok := whitelist[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// ConvertSelectorToLabelsMap converts selector string to labels map // ConvertSelectorToLabelsMap converts selector string to labels map
// and validates keys and values // and validates keys and values
func ConvertSelectorToLabelsMap(selector string) (Set, error) { func ConvertSelectorToLabelsMap(selector string) (Set, error) {

View File

@ -263,11 +263,11 @@ func (r *Requirement) Values() sets.String {
} }
// Empty returns true if the internalSelector doesn't restrict selection space // Empty returns true if the internalSelector doesn't restrict selection space
func (lsel internalSelector) Empty() bool { func (s internalSelector) Empty() bool {
if lsel == nil { if s == nil {
return true return true
} }
return len(lsel) == 0 return len(s) == 0
} }
// String returns a human-readable string that represents this // String returns a human-readable string that represents this
@ -330,51 +330,51 @@ func safeSort(in []string) []string {
} }
// Add adds requirements to the selector. It copies the current selector returning a new one // Add adds requirements to the selector. It copies the current selector returning a new one
func (lsel internalSelector) Add(reqs ...Requirement) Selector { func (s internalSelector) Add(reqs ...Requirement) Selector {
var sel internalSelector var ret internalSelector
for ix := range lsel { for ix := range s {
sel = append(sel, lsel[ix]) ret = append(ret, s[ix])
} }
for _, r := range reqs { for _, r := range reqs {
sel = append(sel, r) ret = append(ret, r)
} }
sort.Sort(ByKey(sel)) sort.Sort(ByKey(ret))
return sel return ret
} }
// Matches for a internalSelector returns true if all // Matches for a internalSelector returns true if all
// its Requirements match the input Labels. If any // its Requirements match the input Labels. If any
// Requirement does not match, false is returned. // Requirement does not match, false is returned.
func (lsel internalSelector) Matches(l Labels) bool { func (s internalSelector) Matches(l Labels) bool {
for ix := range lsel { for ix := range s {
if matches := lsel[ix].Matches(l); !matches { if matches := s[ix].Matches(l); !matches {
return false return false
} }
} }
return true return true
} }
func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true }
// String returns a comma-separated string of all // String returns a comma-separated string of all
// the internalSelector Requirements' human-readable strings. // the internalSelector Requirements' human-readable strings.
func (lsel internalSelector) String() string { func (s internalSelector) String() string {
var reqs []string var reqs []string
for ix := range lsel { for ix := range s {
reqs = append(reqs, lsel[ix].String()) reqs = append(reqs, s[ix].String())
} }
return strings.Join(reqs, ",") return strings.Join(reqs, ",")
} }
// RequiresExactMatch introspect whether a given selector requires a single specific field // RequiresExactMatch introspect whether a given selector requires a single specific field
// to be set, and if so returns the value it requires. // to be set, and if so returns the value it requires.
func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) {
for ix := range lsel { for ix := range s {
if lsel[ix].key == label { if s[ix].key == label {
switch lsel[ix].operator { switch s[ix].operator {
case selection.Equals, selection.DoubleEquals, selection.In: case selection.Equals, selection.DoubleEquals, selection.In:
if len(lsel[ix].strValues) == 1 { if len(s[ix].strValues) == 1 {
return lsel[ix].strValues[0], true return s[ix].strValues[0], true
} }
} }
return "", false return "", false
@ -789,12 +789,12 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) {
// parseExactValue parses the only value for exact match style // parseExactValue parses the only value for exact match style
func (p *Parser) parseExactValue() (sets.String, error) { func (p *Parser) parseExactValue() (sets.String, error) {
s := sets.NewString() s := sets.NewString()
tok, lit := p.lookahead(Values) tok, _ := p.lookahead(Values)
if tok == EndOfStringToken || tok == CommaToken { if tok == EndOfStringToken || tok == CommaToken {
s.Insert("") s.Insert("")
return s, nil return s, nil
} }
tok, lit = p.consume(Values) tok, lit := p.consume(Values)
if tok == IdentifierToken { if tok == IdentifierToken {
s.Insert(lit) s.Insert(lit)
return s, nil return s, nil

View File

@ -186,6 +186,9 @@ func fromUnstructured(sv, dv reflect.Value) error {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
dv.Set(sv.Convert(dt)) dv.Set(sv.Convert(dt))
return nil return nil
case reflect.Float32, reflect.Float64:
dv.Set(sv.Convert(dt))
return nil
} }
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
switch dt.Kind() { switch dt.Kind() {

View File

@ -17,7 +17,7 @@ limitations under the License.
// This file was autogenerated by go-to-protobuf. Do not edit it manually! // This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2'; syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime; package k8s.io.apimachinery.pkg.runtime;

View File

@ -57,7 +57,7 @@ type Encoder interface {
// Identifiers of two different encoders should be equal if and only if for every input // Identifiers of two different encoders should be equal if and only if for every input
// object it will be encoded to the same representation by both of them. // object it will be encoded to the same representation by both of them.
// //
// Identifier is inteted for use with CacheableObject#CacheEncode method. In order to // Identifier is intended for use with CacheableObject#CacheEncode method. In order to
// correctly handle CacheableObject, Encode() method should look similar to below, where // correctly handle CacheableObject, Encode() method should look similar to below, where
// doEncode() is the encoding logic of implemented encoder: // doEncode() is the encoding logic of implemented encoder:
// func (e *MyEncoder) Encode(obj Object, w io.Writer) error { // func (e *MyEncoder) Encode(obj Object, w io.Writer) error {

View File

@ -92,39 +92,6 @@ func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion
} }
} }
// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver
// where objects are converted to gv prior to sending and decoded to their internal representation prior
// to retrieval.
//
// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release.
func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
decode := schema.GroupVersions{
{
Group: gv.Group,
Version: APIVersionInternal,
},
// always include the legacy group as a decoding target to handle non-error `Status` return types
{
Group: "",
Version: APIVersionInternal,
},
}
return &clientNegotiator{
encode: gv,
decode: decode,
serializer: serializer,
}
}
// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used
// for testing or when the caller is taking responsibility for setting the GVK on encoded objects.
func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator {
return &clientNegotiator{
serializer: &simpleNegotiatedSerializer{info: info},
encode: gv,
}
}
type simpleNegotiatedSerializer struct { type simpleNegotiatedSerializer struct {
info SerializerInfo info SerializerInfo
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
// This file was autogenerated by go-to-protobuf. Do not edit it manually! // This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2'; syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime.schema; package k8s.io.apimachinery.pkg.runtime.schema;

View File

@ -23,8 +23,8 @@ type ObjectKind interface {
// SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil
// should clear the current setting. // should clear the current setting.
SetGroupVersionKind(kind GroupVersionKind) SetGroupVersionKind(kind GroupVersionKind)
// GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does // GroupVersionKind returns the stored group, version, and kind of an object, or an empty struct
// not expose or provide these fields. // if the object does not expose or provide these fields.
GroupVersionKind() GroupVersionKind GroupVersionKind() GroupVersionKind
} }

View File

@ -18,7 +18,6 @@ package runtime
import ( import (
"fmt" "fmt"
"net/url"
"reflect" "reflect"
"strings" "strings"
@ -105,9 +104,6 @@ func NewScheme() *Scheme {
// Enable couple default conversions by default. // Enable couple default conversions by default.
utilruntime.Must(RegisterEmbeddedConversions(s)) utilruntime.Must(RegisterEmbeddedConversions(s))
utilruntime.Must(RegisterStringConversions(s)) utilruntime.Must(RegisterStringConversions(s))
utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
return s return s
} }
@ -309,11 +305,6 @@ func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) {
return nil, NewNotRegisteredErrForKind(s.schemeName, kind) return nil, NewNotRegisteredErrForKind(s.schemeName, kind)
} }
// Log sets a logger on the scheme. For test purposes only
func (s *Scheme) Log(l conversion.DebugLogger) {
s.converter.Debug = l
}
// AddIgnoredConversionType identifies a pair of types that should be skipped by // AddIgnoredConversionType identifies a pair of types that should be skipped by
// conversion (because the data inside them is explicitly dropped during // conversion (because the data inside them is explicitly dropped during
// conversion). // conversion).
@ -342,14 +333,6 @@ func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conver
return nil return nil
} }
// RegisterInputDefaults sets the provided field mapping function and field matching
// as the defaults for the provided input type. The fn may be nil, in which case no
// mapping will happen by default. Use this method to register a mechanism for handling
// a specific input type in conversion, such as a map[string]string to structs.
func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error {
return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
}
// AddTypeDefaultingFunc registers a function that is passed a pointer to an // AddTypeDefaultingFunc registers a function that is passed a pointer to an
// object and can default fields on the object. These functions will be invoked // object and can default fields on the object. These functions will be invoked
// when Default() is called. The function will never be called unless the // when Default() is called. The function will never be called unless the
@ -433,12 +416,9 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error {
in = typed in = typed
} }
flags, meta := s.generateConvertMeta(in) meta := s.generateConvertMeta(in)
meta.Context = context meta.Context = context
if flags == 0 { return s.converter.Convert(in, out, meta)
flags = conversion.AllowDifferentFieldTypeNames
}
return s.converter.Convert(in, out, flags, meta)
} }
// ConvertFieldLabel alters the given field label and value for an kind field selector from // ConvertFieldLabel alters the given field label and value for an kind field selector from
@ -535,9 +515,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
in = in.DeepCopyObject() in = in.DeepCopyObject()
} }
flags, meta := s.generateConvertMeta(in) meta := s.generateConvertMeta(in)
meta.Context = target meta.Context = target
if err := s.converter.Convert(in, out, flags, meta); err != nil { if err := s.converter.Convert(in, out, meta); err != nil {
return nil, err return nil, err
} }
@ -565,7 +545,7 @@ func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) {
} }
// generateConvertMeta constructs the meta value we pass to Convert. // generateConvertMeta constructs the meta value we pass to Convert.
func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { func (s *Scheme) generateConvertMeta(in interface{}) *conversion.Meta {
return s.converter.DefaultMeta(reflect.TypeOf(in)) return s.converter.DefaultMeta(reflect.TypeOf(in))
} }

View File

@ -109,7 +109,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
// versions and content types. // versions and content types.
type CodecFactory struct { type CodecFactory struct {
scheme *runtime.Scheme scheme *runtime.Scheme
serializers []serializerType
universal runtime.Decoder universal runtime.Decoder
accepts []runtime.SerializerInfo accepts []runtime.SerializerInfo
@ -217,7 +216,6 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
return CodecFactory{ return CodecFactory{
scheme: scheme, scheme: scheme,
serializers: serializers,
universal: recognizer.NewDecoder(decoders...), universal: recognizer.NewDecoder(decoders...),
accepts: accepts, accepts: accepts,

View File

@ -96,6 +96,7 @@ type SerializerOptions struct {
Strict bool Strict bool
} }
// Serializer handles encoding versioned objects into the proper JSON form
type Serializer struct { type Serializer struct {
meta MetaFactory meta MetaFactory
options SerializerOptions options SerializerOptions
@ -144,10 +145,10 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
} }
} }
// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be // CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
// case-sensitive when unmarshalling, and otherwise compatible with // case-sensitive when unmarshalling, and otherwise compatible with
// the encoding/json standard library. // the encoding/json standard library.
func CaseSensitiveJsonIterator() jsoniter.API { func CaseSensitiveJSONIterator() jsoniter.API {
config := jsoniter.Config{ config := jsoniter.Config{
EscapeHTML: true, EscapeHTML: true,
SortMapKeys: true, SortMapKeys: true,
@ -159,10 +160,10 @@ func CaseSensitiveJsonIterator() jsoniter.API {
return config return config
} }
// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be // StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with // case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
// the encoding/json standard library. // the encoding/json standard library.
func StrictCaseSensitiveJsonIterator() jsoniter.API { func StrictCaseSensitiveJSONIterator() jsoniter.API {
config := jsoniter.Config{ config := jsoniter.Config{
EscapeHTML: true, EscapeHTML: true,
SortMapKeys: true, SortMapKeys: true,
@ -179,8 +180,8 @@ func StrictCaseSensitiveJsonIterator() jsoniter.API {
// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
// in some other library will mess with every usage of the jsoniter library in the whole program. // in some other library will mess with every usage of the jsoniter library in the whole program.
// See https://github.com/json-iterator/go/issues/265 // See https://github.com/json-iterator/go/issues/265
var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() var caseSensitiveJSONIterator = CaseSensitiveJSONIterator()
var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator()
// gvkWithDefaults returns group kind and version defaulting from provided default // gvkWithDefaults returns group kind and version defaulting from provided default
func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
@ -236,7 +237,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
types, _, err := s.typer.ObjectKinds(into) types, _, err := s.typer.ObjectKinds(into)
switch { switch {
case runtime.IsNotRegisteredError(err), isUnstructured: case runtime.IsNotRegisteredError(err), isUnstructured:
if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil {
return nil, actual, err return nil, actual, err
} }
return into, actual, nil return into, actual, nil
@ -260,7 +261,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
return nil, actual, err return nil, actual, err
} }
if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil {
return nil, actual, err return nil, actual, err
} }
@ -285,7 +286,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
// due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
// the actual error is that the object contains unknown field. // the actual error is that the object contains unknown field.
strictObj := obj.DeepCopyObject() strictObj := obj.DeepCopyObject()
if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil {
return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
} }
// Always return the same object as the non-strict serializer to avoid any deviations. // Always return the same object as the non-strict serializer to avoid any deviations.
@ -302,7 +303,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
if s.options.Yaml { if s.options.Yaml {
json, err := caseSensitiveJsonIterator.Marshal(obj) json, err := caseSensitiveJSONIterator.Marshal(obj)
if err != nil { if err != nil {
return err return err
} }
@ -315,7 +316,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
} }
if s.options.Pretty { if s.options.Pretty {
data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ")
if err != nil { if err != nil {
return err return err
} }

View File

@ -61,6 +61,7 @@ func (e errNotMarshalable) Status() metav1.Status {
} }
} }
// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise
func IsNotMarshalable(err error) bool { func IsNotMarshalable(err error) bool {
_, ok := err.(errNotMarshalable) _, ok := err.(errNotMarshalable)
return err != nil && ok return err != nil && ok
@ -77,6 +78,7 @@ func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Se
} }
} }
// Serializer handles encoding versioned objects into the proper wire form
type Serializer struct { type Serializer struct {
prefix []byte prefix []byte
creater runtime.ObjectCreater creater runtime.ObjectCreater
@ -457,8 +459,10 @@ func (s *RawSerializer) Identifier() runtime.Identifier {
return rawSerializerIdentifier return rawSerializerIdentifier
} }
// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer
var LengthDelimitedFramer = lengthDelimitedFramer{} var LengthDelimitedFramer = lengthDelimitedFramer{}
// Provides length delimited frame reader and writer methods
type lengthDelimitedFramer struct{} type lengthDelimitedFramer struct{}
// NewFrameWriter implements stream framing for this serializer // NewFrameWriter implements stream framing for this serializer

View File

@ -16,10 +16,6 @@ limitations under the License.
package types package types
import (
"fmt"
)
// NamespacedName comprises a resource name, with a mandatory namespace, // NamespacedName comprises a resource name, with a mandatory namespace,
// rendered as "<namespace>/<name>". Being a type captures intent and // rendered as "<namespace>/<name>". Being a type captures intent and
// helps make sure that UIDs, namespaced names and non-namespaced names // helps make sure that UIDs, namespaced names and non-namespaced names
@ -39,5 +35,5 @@ const (
// String returns the general purpose string representation // String returns the general purpose string representation
func (n NamespacedName) String() string { func (n NamespacedName) String() string {
return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) return n.Namespace + string(Separator) + n.Name
} }

View File

@ -348,7 +348,13 @@ func (f *fakeTimer) Stop() bool {
// Reset conditionally updates the firing time of the timer. If the // Reset conditionally updates the firing time of the timer. If the
// timer has neither fired nor been stopped then this call resets the // timer has neither fired nor been stopped then this call resets the
// timer to the fake clock's "now" + d and returns true, otherwise // timer to the fake clock's "now" + d and returns true, otherwise
// this call returns false. This is like time.Timer::Reset. // it creates a new waiter, adds it to the clock, and returns true.
//
// It is not possible to return false, because a fake timer can be reset
// from any state (waiting to fire, already fired, and stopped).
//
// See the GoDoc for time.Timer::Reset for more context on why
// the return value of Reset() is not useful.
func (f *fakeTimer) Reset(d time.Duration) bool { func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock() f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock() defer f.fakeClock.lock.Unlock()
@ -360,7 +366,15 @@ func (f *fakeTimer) Reset(d time.Duration) bool {
return true return true
} }
} }
return false // No existing waiter, timer has already fired or been reset.
// We should still enable Reset() to succeed by creating a
// new waiter and adding it to the clock's waiters.
newWaiter := fakeClockWaiter{
targetTime: f.fakeClock.time.Add(d),
destChan: seekChan,
}
f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
return true
} }
// Ticker defines the Ticker interface // Ticker defines the Ticker interface

View File

@ -163,7 +163,7 @@ func matchesError(err error, fns ...Matcher) bool {
// filterErrors returns any errors (or nested errors, if the list contains // filterErrors returns any errors (or nested errors, if the list contains
// nested Errors) for which all fns return false. If no errors // nested Errors) for which all fns return false. If no errors
// remain a nil list is returned. The resulting silec will have all // remain a nil list is returned. The resulting slice will have all
// nested slices flattened as a side effect. // nested slices flattened as a side effect.
func filterErrors(list []error, fns ...Matcher) []error { func filterErrors(list []error, fns ...Matcher) []error {
result := []error{} result := []error{}

View File

@ -132,12 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// Return whatever remaining data exists from an in progress frame // Return whatever remaining data exists from an in progress frame
if n := len(r.remaining); n > 0 { if n := len(r.remaining); n > 0 {
if n <= len(data) { if n <= len(data) {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining...) data = append(data[0:0], r.remaining...)
r.remaining = nil r.remaining = nil
return n, nil return n, nil
} }
n = len(data) n = len(data)
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining[:n]...) data = append(data[0:0], r.remaining[:n]...)
r.remaining = r.remaining[n:] r.remaining = r.remaining[n:]
return n, io.ErrShortBuffer return n, io.ErrShortBuffer
@ -155,6 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// and set m to it, which means we need to copy the partial result back into data and preserve // and set m to it, which means we need to copy the partial result back into data and preserve
// the remaining result for subsequent reads. // the remaining result for subsequent reads.
if len(m) > n { if len(m) > n {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], m[:n]...) data = append(data[0:0], m[:n]...)
r.remaining = m[n:] r.remaining = m[n:]
return n, io.ErrShortBuffer return n, io.ErrShortBuffer

View File

@ -34,38 +34,62 @@ type connection struct {
streams []httpstream.Stream streams []httpstream.Stream
streamLock sync.Mutex streamLock sync.Mutex
newStreamHandler httpstream.NewStreamHandler newStreamHandler httpstream.NewStreamHandler
ping func() (time.Duration, error)
} }
// NewClientConnection creates a new SPDY client connection. // NewClientConnection creates a new SPDY client connection.
func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
return NewClientConnectionWithPings(conn, 0)
}
// NewClientConnectionWithPings creates a new SPDY client connection.
//
// If pingPeriod is non-zero, a background goroutine will send periodic Ping
// frames to the server. Use this to keep idle connections through certain load
// balancers alive longer.
func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) {
spdyConn, err := spdystream.NewConnection(conn, false) spdyConn, err := spdystream.NewConnection(conn, false)
if err != nil { if err != nil {
defer conn.Close() defer conn.Close()
return nil, err return nil, err
} }
return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil
} }
// NewServerConnection creates a new SPDY server connection. newStreamHandler // NewServerConnection creates a new SPDY server connection. newStreamHandler
// will be invoked when the server receives a newly created stream from the // will be invoked when the server receives a newly created stream from the
// client. // client.
func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
return NewServerConnectionWithPings(conn, newStreamHandler, 0)
}
// NewServerConnectionWithPings creates a new SPDY server connection.
// newStreamHandler will be invoked when the server receives a newly created
// stream from the client.
//
// If pingPeriod is non-zero, a background goroutine will send periodic Ping
// frames to the server. Use this to keep idle connections through certain load
// balancers alive longer.
func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) {
spdyConn, err := spdystream.NewConnection(conn, true) spdyConn, err := spdystream.NewConnection(conn, true)
if err != nil { if err != nil {
defer conn.Close() defer conn.Close()
return nil, err return nil, err
} }
return newConnection(spdyConn, newStreamHandler), nil return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil
} }
// newConnection returns a new connection wrapping conn. newStreamHandler // newConnection returns a new connection wrapping conn. newStreamHandler
// will be invoked when the server receives a newly created stream from the // will be invoked when the server receives a newly created stream from the
// client. // client.
func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
c := &connection{conn: conn, newStreamHandler: newStreamHandler} c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn}
go conn.Serve(c.newSpdyStream) go conn.Serve(c.newSpdyStream)
if pingPeriod > 0 && pingFn != nil {
go c.sendPings(pingPeriod)
}
return c return c
} }
@ -143,3 +167,21 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) {
func (c *connection) SetIdleTimeout(timeout time.Duration) { func (c *connection) SetIdleTimeout(timeout time.Duration) {
c.conn.SetIdleTimeout(timeout) c.conn.SetIdleTimeout(timeout)
} }
func (c *connection) sendPings(period time.Duration) {
t := time.NewTicker(period)
defer t.Stop()
for {
select {
case <-c.conn.CloseChan():
return
case <-t.C:
}
if _, err := c.ping(); err != nil {
klog.V(3).Infof("SPDY Ping failed: %v", err)
// Continue, in case this is a transient failure.
// c.conn.CloseChan above will tell us when the connection is
// actually closed.
}
}
}

Some files were not shown because too many files have changed in this diff Show More