generated: ./hack/update-vendor.sh
This commit is contained in:
30
LICENSES/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
generated
vendored
30
LICENSES/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
generated
vendored
@@ -1,30 +0,0 @@
|
||||
= vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4 licensed under: =
|
||||
|
||||
Copyright 2021 The ANTLR Project
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE 7efb09a9ec943fd32bc2645ceaf109d0
|
32
LICENSES/vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
32
LICENSES/vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
= vendor/github.com/antlr4-go/antlr/v4 licensed under: =
|
||||
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
= vendor/github.com/antlr4-go/antlr/v4/LICENSE f399e127495f9783cfbe2b3b2802555f
|
6
go.mod
6
go.mod
@@ -131,7 +131,7 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
@@ -217,12 +217,12 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
gopkg.in/gcfg.v1 v1.2.3 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
|
14
go.sum
14
go.sum
@@ -189,8 +189,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
@@ -428,8 +428,6 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cadvisor v0.49.0 h1:1PYeiORXmcFYi609M4Qvq5IzcvcVaWgYxDt78uH8jYA=
|
||||
github.com/google/cadvisor v0.49.0/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
@@ -837,8 +835,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1189,8 +1187,8 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
|
@@ -14,7 +14,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@@ -20,7 +20,7 @@ require (
|
||||
go.etcd.io/etcd/client/v3 v3.5.10
|
||||
go.opentelemetry.io/otel v1.19.0
|
||||
go.opentelemetry.io/otel/trace v1.19.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5
|
||||
google.golang.org/grpc v1.58.3
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
@@ -40,7 +40,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -104,7 +104,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
|
16
staging/src/k8s.io/apiextensions-apiserver/go.sum
generated
16
staging/src/k8s.io/apiextensions-apiserver/go.sum
generated
@@ -127,8 +127,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
@@ -219,8 +219,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -398,8 +398,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@@ -484,8 +484,8 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@@ -37,7 +37,6 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@@ -38,7 +38,7 @@ require (
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/time v0.3.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5
|
||||
google.golang.org/grpc v1.58.3
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
@@ -59,7 +59,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -111,7 +111,7 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
|
16
staging/src/k8s.io/apiserver/go.sum
generated
16
staging/src/k8s.io/apiserver/go.sum
generated
@@ -127,8 +127,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
@@ -220,8 +220,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -400,8 +400,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@@ -484,8 +484,8 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@@ -40,7 +40,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
|
@@ -42,7 +42,6 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
|
@@ -23,7 +23,7 @@ require (
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -82,7 +82,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
@@ -92,7 +92,7 @@ require (
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
16
staging/src/k8s.io/cloud-provider/go.sum
generated
16
staging/src/k8s.io/cloud-provider/go.sum
generated
@@ -125,8 +125,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
@@ -199,8 +199,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -356,8 +356,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
@@ -405,8 +405,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -16,7 +16,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
@@ -25,7 +25,6 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
|
@@ -46,7 +46,6 @@ require (
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
@@ -70,7 +69,7 @@ require (
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
4
staging/src/k8s.io/component-base/go.sum
generated
4
staging/src/k8s.io/component-base/go.sum
generated
@@ -220,8 +220,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -24,7 +24,6 @@ require (
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
|
@@ -19,7 +19,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -78,7 +78,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
@@ -87,7 +87,7 @@ require (
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
16
staging/src/k8s.io/controller-manager/go.sum
generated
16
staging/src/k8s.io/controller-manager/go.sum
generated
@@ -124,8 +124,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
@@ -196,8 +196,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -352,8 +352,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
@@ -400,8 +400,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -13,7 +13,6 @@ require (
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
@@ -15,7 +15,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@@ -21,7 +21,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
@@ -43,7 +43,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
@@ -52,7 +52,7 @@ require (
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
@@ -1,8 +1,8 @@
|
||||
cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
@@ -51,8 +51,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -159,8 +159,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
@@ -206,8 +206,8 @@ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNq
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -29,7 +29,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
|
2
staging/src/k8s.io/endpointslice/go.sum
generated
2
staging/src/k8s.io/endpointslice/go.sum
generated
@@ -183,7 +183,7 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
|
@@ -11,7 +11,6 @@ require (
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
|
@@ -27,7 +27,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -84,7 +84,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
@@ -95,7 +95,7 @@ require (
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
16
staging/src/k8s.io/kube-aggregator/go.sum
generated
16
staging/src/k8s.io/kube-aggregator/go.sum
generated
@@ -124,8 +124,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
@@ -197,8 +197,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -357,8 +357,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
@@ -407,8 +407,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -13,7 +13,6 @@ require (
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
|
@@ -1,6 +1,6 @@
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
@@ -30,7 +30,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
@@ -107,7 +107,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -143,7 +143,7 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
|
@@ -15,7 +15,6 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
|
2
staging/src/k8s.io/kube-proxy/go.sum
generated
2
staging/src/k8s.io/kube-proxy/go.sum
generated
@@ -143,7 +143,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
|
@@ -15,7 +15,6 @@ require (
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
2
staging/src/k8s.io/kube-scheduler/go.sum
generated
2
staging/src/k8s.io/kube-scheduler/go.sum
generated
@@ -121,7 +121,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
|
@@ -59,7 +59,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
|
2
staging/src/k8s.io/kubectl/go.sum
generated
2
staging/src/k8s.io/kubectl/go.sum
generated
@@ -285,7 +285,7 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
|
@@ -26,7 +26,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
|
8
staging/src/k8s.io/kubelet/go.sum
generated
8
staging/src/k8s.io/kubelet/go.sum
generated
@@ -4,7 +4,7 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
@@ -57,7 +57,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -170,7 +170,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -214,7 +214,7 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -36,7 +36,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
|
10
staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
10
staging/src/k8s.io/legacy-cloud-providers/go.sum
generated
@@ -58,7 +58,7 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -164,7 +164,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -348,7 +348,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -668,8 +668,8 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
|
@@ -22,7 +22,6 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
|
@@ -18,7 +18,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
@@ -22,7 +22,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
@@ -79,7 +79,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
@@ -89,7 +89,7 @@ require (
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
16
staging/src/k8s.io/pod-security-admission/go.sum
generated
16
staging/src/k8s.io/pod-security-admission/go.sum
generated
@@ -124,8 +124,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
@@ -196,8 +196,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -352,8 +352,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
@@ -400,8 +400,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -19,7 +19,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -76,7 +76,7 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
@@ -88,7 +88,7 @@ require (
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
16
staging/src/k8s.io/sample-apiserver/go.sum
generated
16
staging/src/k8s.io/sample-apiserver/go.sum
generated
@@ -124,8 +124,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
@@ -196,8 +196,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -353,8 +353,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
@@ -403,8 +403,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g=
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
|
@@ -24,7 +24,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
|
@@ -24,7 +24,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
|
26
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
generated
vendored
26
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
generated
vendored
@@ -1,26 +0,0 @@
|
||||
Copyright 2021 The ANTLR Project
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system.
|
||||
|
||||
Here is a general template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── myproject
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.0-complete.jar
|
||||
│ ├── error_listeners.go
|
||||
│ ├── generate.go
|
||||
│ ├── generate.sh
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options.
|
||||
|
||||
From the command line at the root of your package “myproject” you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
*/
|
||||
package antlr
|
303
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
generated
vendored
303
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
generated
vendored
@@ -1,303 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig interface {
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Hash() int
|
||||
|
||||
GetState() ATNState
|
||||
GetAlt() int
|
||||
GetSemanticContext() SemanticContext
|
||||
|
||||
GetContext() PredictionContext
|
||||
SetContext(PredictionContext)
|
||||
|
||||
GetReachesIntoOuterContext() int
|
||||
SetReachesIntoOuterContext(int)
|
||||
|
||||
String() string
|
||||
|
||||
getPrecedenceFilterSuppressed() bool
|
||||
setPrecedenceFilterSuppressed(bool)
|
||||
}
|
||||
|
||||
type BaseATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
}
|
||||
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
|
||||
return &BaseATNConfig{
|
||||
state: old.state,
|
||||
alt: old.alt,
|
||||
context: old.context,
|
||||
semanticContext: old.semanticContext,
|
||||
reachesIntoOuterContext: old.reachesIntoOuterContext,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil")
|
||||
}
|
||||
|
||||
return &BaseATNConfig{
|
||||
state: state,
|
||||
alt: c.GetAlt(),
|
||||
context: context,
|
||||
semanticContext: semanticContext,
|
||||
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
|
||||
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return b.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
b.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetState() ATNState {
|
||||
return b.state
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetAlt() int {
|
||||
return b.alt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetContext(v PredictionContext) {
|
||||
b.context = v
|
||||
}
|
||||
func (b *BaseATNConfig) GetContext() PredictionContext {
|
||||
return b.context
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
|
||||
return b.semanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
|
||||
return b.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
b.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
|
||||
if b == o {
|
||||
return true
|
||||
} else if o == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var other, ok = o.(*BaseATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if b.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = b.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = b.alt == other.alt
|
||||
cons = b.semanticContext.Equals(other.semanticContext)
|
||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
// Hash is the default hash function for BaseATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (b *BaseATNConfig) Hash() int {
|
||||
var c int
|
||||
if b.context != nil {
|
||||
c = b.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
||||
h = murmurUpdate(h, b.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, b.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if b.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(b.context) + "]"
|
||||
}
|
||||
|
||||
if b.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(b.semanticContext)
|
||||
}
|
||||
|
||||
if b.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
type LexerATNConfig struct {
|
||||
*BaseATNConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Hash() int {
|
||||
var f int
|
||||
if l.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, l.state.GetStateNumber())
|
||||
h = murmurUpdate(h, l.alt)
|
||||
h = murmurUpdate(h, l.context.Hash())
|
||||
h = murmurUpdate(h, l.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
var othert, ok = other.(*LexerATNConfig)
|
||||
|
||||
if l == other {
|
||||
return true
|
||||
} else if !ok {
|
||||
return false
|
||||
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
var b bool
|
||||
|
||||
if l.lexerActionExecutor != nil {
|
||||
b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
|
||||
} else {
|
||||
b = othert.lexerActionExecutor != nil
|
||||
}
|
||||
|
||||
if b {
|
||||
return false
|
||||
}
|
||||
|
||||
return l.BaseATNConfig.Equals(othert.BaseATNConfig)
|
||||
}
|
||||
|
||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
441
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
generated
vendored
441
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
generated
vendored
@@ -1,441 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ATNConfigSet interface {
|
||||
Hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Add(ATNConfig, *DoubleDict) bool
|
||||
AddAll([]ATNConfig) bool
|
||||
|
||||
GetStates() *JStore[ATNState, Comparator[ATNState]]
|
||||
GetPredicates() []SemanticContext
|
||||
GetItems() []ATNConfig
|
||||
|
||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
||||
|
||||
Length() int
|
||||
IsEmpty() bool
|
||||
Contains(ATNConfig) bool
|
||||
ContainsFast(ATNConfig) bool
|
||||
Clear()
|
||||
String() string
|
||||
|
||||
HasSemanticContext() bool
|
||||
SetHasSemanticContext(v bool)
|
||||
|
||||
ReadOnly() bool
|
||||
SetReadOnly(bool)
|
||||
|
||||
GetConflictingAlts() *BitSet
|
||||
SetConflictingAlts(*BitSet)
|
||||
|
||||
Alts() *BitSet
|
||||
|
||||
FullContext() bool
|
||||
|
||||
GetUniqueAlt() int
|
||||
SetUniqueAlt(int)
|
||||
|
||||
GetDipsIntoOuterContext() bool
|
||||
SetDipsIntoOuterContext(bool)
|
||||
}
|
||||
|
||||
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type BaseATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
|
||||
|
||||
// configs is the added elements.
|
||||
configs []ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from a.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
|
||||
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
|
||||
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
|
||||
// dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) HasSemanticContext() bool {
|
||||
return b.hasSemanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
|
||||
b.hasSemanticContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
|
||||
preds := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
preds = append(preds, c)
|
||||
}
|
||||
}
|
||||
|
||||
return preds
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
|
||||
return b.configs
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare is a hack function just to verify that adding DFAstares to the known
|
||||
// set works, so long as comparison of ATNConfigSet s works. For that to work, we
|
||||
// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
|
||||
// know the order, so we do this inefficient hack. If this proves the point, then
|
||||
// we can change the config set to a better structure.
|
||||
func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range b.configs {
|
||||
found := false
|
||||
for _, c2 := range bs.configs {
|
||||
if c.Equals(c2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*BaseATNConfigSet)
|
||||
|
||||
return b.configs != nil &&
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
b.conflictingAlts == other2.conflictingAlts &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Length() int {
|
||||
return len(b.configs)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) IsEmpty() bool {
|
||||
return len(b.configs) == 0
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
return b.fullCtx
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
|
||||
return b.dipsIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
|
||||
b.dipsIntoOuterContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetUniqueAlt() int {
|
||||
return b.uniqueAlt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
|
||||
b.uniqueAlt = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
|
||||
return b.conflictingAlts
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
|
||||
b.conflictingAlts = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ReadOnly() bool {
|
||||
return b.readOnly
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
|
||||
b.readOnly = readOnly
|
||||
|
||||
if readOnly {
|
||||
b.configLookup = nil // Read only, so no need for the lookup cache
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) String() string {
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type OrderedATNConfigSet struct {
|
||||
*BaseATNConfigSet
|
||||
}
|
||||
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
|
||||
func hashATNConfig(i interface{}) int {
|
||||
o := i.(ATNConfig)
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
func equalATNConfigs(a, b interface{}) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
|
||||
var ai, ok = a.(ATNConfig)
|
||||
var bi, ok1 = b.(ATNConfig)
|
||||
|
||||
if !ok || !ok1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetAlt() != bi.GetAlt() {
|
||||
return false
|
||||
}
|
||||
|
||||
return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
|
||||
}
|
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
generated
vendored
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
generated
vendored
@@ -1,113 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := new(InputStream)
|
||||
|
||||
is.name = "<empty>"
|
||||
is.index = 0
|
||||
is.data = []rune(data)
|
||||
is.size = len(is.data) // number of runes
|
||||
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// mark/release do nothing we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (is *InputStream) Release(marker int) {
|
||||
}
|
||||
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i *Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return "Obtained from string"
|
||||
}
|
||||
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
@@ -1,198 +0,0 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
for _, v1 := range s.store[kh] {
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
return v1, true
|
||||
}
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
s.len++
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(key)
|
||||
|
||||
for _, v := range s.store[kh] {
|
||||
if s.comparator.Equals2(key, v) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
|
||||
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
|
||||
return &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
m.len++
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
for _, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
return e.val, true
|
||||
}
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return len(m.store)
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
806
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
generated
vendored
806
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
generated
vendored
@@ -1,806 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Represents {@code $} in local context prediction, which means wildcard.
|
||||
// {@code//+x =//}.
|
||||
// /
|
||||
const (
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// Represents {@code $} in an array in full context mode, when {@code $}
|
||||
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
||||
// {@code $} = {@link //EmptyReturnState}.
|
||||
// /
|
||||
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
type PredictionContext interface {
|
||||
Hash() int
|
||||
Equals(interface{}) bool
|
||||
GetParent(int) PredictionContext
|
||||
getReturnState(int) int
|
||||
length() int
|
||||
isEmpty() bool
|
||||
hasEmptyPath() bool
|
||||
String() string
|
||||
}
|
||||
|
||||
type BasePredictionContext struct {
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
|
||||
pc := new(BasePredictionContext)
|
||||
pc.cachedHash = cachedHash
|
||||
|
||||
return pc
|
||||
}
|
||||
|
||||
func (b *BasePredictionContext) isEmpty() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateHash(parent PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
|
||||
type PredictionContextCache struct {
|
||||
cache map[PredictionContext]PredictionContext
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
t := new(PredictionContextCache)
|
||||
t.cache = make(map[PredictionContext]PredictionContext)
|
||||
return t
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a Newcontext to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
|
||||
if ctx == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
existing := p.cache[ctx]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
p.cache[ctx] = ctx
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
|
||||
return p.cache[ctx]
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return len(p.cache)
|
||||
}
|
||||
|
||||
type SingletonPredictionContext interface {
|
||||
PredictionContext
|
||||
}
|
||||
|
||||
type BaseSingletonPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parentCtx PredictionContext
|
||||
returnState int
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
|
||||
var cachedHash int
|
||||
if parent != nil {
|
||||
cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
cachedHash = calculateEmptyHash()
|
||||
}
|
||||
|
||||
s := new(BaseSingletonPredictionContext)
|
||||
s.BasePredictionContext = NewBasePredictionContext(cachedHash)
|
||||
|
||||
s.parentCtx = parent
|
||||
s.returnState = returnState
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) length() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
|
||||
return b.parentCtx
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
|
||||
return b.returnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
|
||||
return b.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) Hash() int {
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
|
||||
if b == other {
|
||||
return true
|
||||
}
|
||||
if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
otherP := other.(*BaseSingletonPredictionContext)
|
||||
|
||||
if b.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
}
|
||||
if b.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return b.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) String() string {
|
||||
var up string
|
||||
|
||||
if b.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = b.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if b.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState) + " " + up
|
||||
}
|
||||
|
||||
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
|
||||
|
||||
type EmptyPredictionContext struct {
|
||||
*BaseSingletonPredictionContext
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *EmptyPredictionContext {
|
||||
|
||||
p := new(EmptyPredictionContext)
|
||||
|
||||
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
|
||||
p.cachedHash = calculateEmptyHash()
|
||||
return p
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) isEmpty() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) getReturnState(index int) int {
|
||||
return e.returnState
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) Hash() int {
|
||||
return e.cachedHash
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) Equals(other interface{}) bool {
|
||||
return e == other
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) String() string {
|
||||
return "$"
|
||||
}
|
||||
|
||||
type ArrayPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parents []PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
c := new(ArrayPredictionContext)
|
||||
c.BasePredictionContext = NewBasePredictionContext(hash)
|
||||
|
||||
c.parents = parents
|
||||
c.returnStates = returnStates
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetReturnStates() []int {
|
||||
return a.returnStates
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) hasEmptyPath() bool {
|
||||
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) isEmpty() bool {
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return a.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) length() int {
|
||||
return len(a.returnStates)
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
|
||||
return a.parents[index]
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) getReturnState(index int) int {
|
||||
return a.returnStates[index]
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Equals(o interface{}) bool {
|
||||
if a == o {
|
||||
return true
|
||||
}
|
||||
other, ok := o.(*ArrayPredictionContext)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(a.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
// Hash is the default hash function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Hash() int {
|
||||
return a.BasePredictionContext.cachedHash
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) String() string {
|
||||
if a.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(a.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(a.returnStates[i])
|
||||
if a.parents[i] != nil {
|
||||
s = s + " " + a.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
|
||||
return s + "]"
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
// In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
|
||||
// in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
|
||||
// from it.
|
||||
// In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
|
||||
// will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
|
||||
// either of them.
|
||||
|
||||
ac, ok1 := a.(*BaseSingletonPredictionContext)
|
||||
bc, ok2 := b.(*BaseSingletonPredictionContext)
|
||||
|
||||
if ok1 && ok2 {
|
||||
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as// wildcard
|
||||
if rootIsWildcard {
|
||||
if _, ok := a.(*EmptyPredictionContext); ok {
|
||||
return a
|
||||
}
|
||||
if _, ok := b.(*EmptyPredictionContext); ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
|
||||
// here.
|
||||
//
|
||||
// TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
|
||||
|
||||
var arp, arb *ArrayPredictionContext
|
||||
var ok bool
|
||||
if arp, ok = a.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = a.(*BaseSingletonPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
||||
} else if _, ok = a.(*EmptyPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
|
||||
if arb, ok = b.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = b.(*BaseSingletonPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
||||
} else if _, ok = b.(*EmptyPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
|
||||
// Both arp and arb
|
||||
return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
// Merge two {@link SingletonBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A Newroot node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent == a.parentCtx {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent == b.parentCtx {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array
|
||||
// Newjoined parent so create Newsingleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent PredictionContext
|
||||
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
// /
|
||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: track whether this is possible above during merge sort for speed
|
||||
// TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
|
||||
if M == a {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), a)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M == b {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), b)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), M)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
|
||||
// ones.
|
||||
// /
|
||||
func combineCommonParents(parents []PredictionContext) {
|
||||
uniqueParents := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
for p := 0; p < len(parents); p++ {
|
||||
parent := parents[p]
|
||||
if uniqueParents[parent] == nil {
|
||||
uniqueParents[parent] = parent
|
||||
}
|
||||
}
|
||||
for q := 0; q < len(parents); q++ {
|
||||
parents[q] = uniqueParents[parents[q]]
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
|
||||
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing := visited[context]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
existing = contextCache.Get(context)
|
||||
if existing != nil {
|
||||
visited[context] = existing
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || parent != context.GetParent(i) {
|
||||
if !changed {
|
||||
parents = make([]PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited[context] = context
|
||||
return context
|
||||
}
|
||||
var updated PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited[updated] = updated
|
||||
visited[context] = updated
|
||||
|
||||
return updated
|
||||
}
|
529
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
generated
vendored
529
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
generated
vendored
@@ -1,529 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
||||
// utility methods for analyzing configuration sets for conflicts and/or
|
||||
// ambiguities.
|
||||
|
||||
const (
|
||||
//
|
||||
// The SLL(*) prediction mode. This prediction mode ignores the current
|
||||
// parser context when making predictions. This is the fastest prediction
|
||||
// mode, and provides correct results for many grammars. This prediction
|
||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
||||
// may result in syntax errors for grammar and input combinations which are
|
||||
// not SLL.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will either return a correct
|
||||
// parse tree (i.e. the same parse tree that would be returned with the
|
||||
// {@link //LL} prediction mode), or it will Report a syntax error. If a
|
||||
// syntax error is encountered when using the {@link //SLL} prediction mode,
|
||||
// it may be due to either an actual syntax error in the input or indicate
|
||||
// that the particular combination of grammar and input requires the more
|
||||
// powerful {@link //LL} prediction abilities to complete successfully.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeSLL = 0
|
||||
//
|
||||
// The LL(*) prediction mode. This prediction mode allows the current parser
|
||||
// context to be used for resolving SLL conflicts that occur during
|
||||
// prediction. This is the fastest prediction mode that guarantees correct
|
||||
// parse results for all combinations of grammars with syntactically correct
|
||||
// inputs.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will make correct decisions
|
||||
// for all syntactically-correct grammar and input combinations. However, in
|
||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
||||
// Report a precise answer for <em>exactly which</em> alternatives are
|
||||
// ambiguous.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLL = 1
|
||||
//
|
||||
// The LL(*) prediction mode with exact ambiguity detection. In addition to
|
||||
// the correctness guarantees provided by the {@link //LL} prediction mode,
|
||||
// this prediction mode instructs the prediction algorithm to determine the
|
||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
||||
// decision encountered while parsing.
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode may be used for diagnosing ambiguities during
|
||||
// grammar development. Due to the performance overhead of calculating sets
|
||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
||||
// the exact results are not necessary.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
// Computes the SLL prediction termination condition.
|
||||
//
|
||||
// <p>
|
||||
// This method computes the SLL prediction termination condition for both of
|
||||
// the following cases.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>The usual SLL+LL fallback upon SLL conflict</li>
|
||||
// <li>Pure SLL without LL fallback</li>
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
|
||||
// ensured regardless of how the termination condition is computed by this
|
||||
// method. Due to the substantially higher cost of LL prediction, the
|
||||
// prediction should only fall back to LL when the additional lookahead
|
||||
// cannot lead to a unique SLL prediction.</p>
|
||||
//
|
||||
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
|
||||
// conflicting subsets should fall back to full LL, even if the
|
||||
// configuration sets don't resolve to the same alternative (e.g.
|
||||
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
|
||||
// configuration, SLL could continue with the hopes that more lookahead will
|
||||
// resolve via one of those non-conflicting configurations.</p>
|
||||
//
|
||||
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
||||
// full LL keeps going when there is uncertainty.</p>
|
||||
//
|
||||
// <p><strong>HEURISTIC</strong></p>
|
||||
//
|
||||
// <p>As a heuristic, we stop prediction when we see any conflicting subset
|
||||
// unless we see a state that only has one alternative associated with it.
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):</p>
|
||||
//
|
||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>
|
||||
//
|
||||
// <p>When the ATN simulation reaches the state before {@code ”}, it has a
|
||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
|
||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
|
||||
// processing this node because alternative to has another way to continue,
|
||||
// via {@code [6|2|[]]}.</p>
|
||||
//
|
||||
// <p>It also let's us continue for this rule:</p>
|
||||
//
|
||||
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
|
||||
//
|
||||
// <p>After Matching input A, we reach the stop state for rule A, state 1.
|
||||
// State 8 is the state right before B. Clearly alternatives 1 and 2
|
||||
// conflict and no amount of further lookahead will separate the two.
|
||||
// However, alternative 3 will be able to continue and so we do not stop
|
||||
// working on this state. In the previous example, we're concerned with
|
||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
||||
// associated with the conflicting configs, but since we can continue
|
||||
// looking for input reasonably, don't declare the state done.</p>
|
||||
//
|
||||
// <p><strong>PURE SLL PARSING</strong></p>
|
||||
//
|
||||
// <p>To handle pure SLL parsing, all we have to do is make sure that we
|
||||
// combine stack contexts for configurations that differ only by semantic
|
||||
// predicate. From there, we can do the usual SLL termination heuristic.</p>
|
||||
//
|
||||
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
|
||||
// states because they need to create the DFA cache that works in all
|
||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
||||
// during start state computation so it can ignore predicates thereafter.
|
||||
// This means that SLL termination detection can totally ignore semantic
|
||||
// predicates.</p>
|
||||
//
|
||||
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
|
||||
// semantic predicate contexts so we might see two configurations like the
|
||||
// following.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
|
||||
//
|
||||
// <p>Before testing these configurations against others, we have to merge
|
||||
// {@code x} and {@code x'} (without modifying the existing configurations).
|
||||
// For example, we test {@code (x+x')==x”} when looking for conflicts in
|
||||
// the following configurations.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>
|
||||
//
|
||||
// <p>If the configuration set has predicates (as indicated by
|
||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
|
||||
// the configurations to strip out all of the predicates so that a standard
|
||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
// configs meet this condition, then none of the configurations is able
|
||||
// to Match additional input so we terminate prediction.
|
||||
//
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return true
|
||||
}
|
||||
// pure SLL mode parsing
|
||||
if mode == PredictionModeSLL {
|
||||
// Don't bother with combining configs from different semantic
|
||||
// contexts if we can fail over to full LL costs more time
|
||||
// since we'll often fail over anyway.
|
||||
if configs.HasSemanticContext() {
|
||||
// dup configs, tossing out semantic predicates
|
||||
dup := NewBaseATNConfigSet(false)
|
||||
for _, c := range configs.GetItems() {
|
||||
|
||||
// NewBaseATNConfig({semanticContext:}, c)
|
||||
c = NewBaseATNConfig2(c, SemanticContextNone)
|
||||
dup.Add(c, nil)
|
||||
}
|
||||
configs = dup
|
||||
}
|
||||
// now we have combined contexts for configs with dissimilar preds
|
||||
}
|
||||
// pure SLL or combined SLL+LL mode parsing
|
||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
||||
}
|
||||
|
||||
// Checks if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Full LL prediction termination.
|
||||
//
|
||||
// <p>Can we stop looking ahead during ATN simulation or is there some
|
||||
// uncertainty as to which alternative we will ultimately pick, after
|
||||
// consuming more input? Even if there are partial conflicts, we might know
|
||||
// that everything is going to resolve to the same minimum alternative. That
|
||||
// means we can stop since no more lookahead will change that fact. On the
|
||||
// other hand, there might be multiple conflicts that resolve to different
|
||||
// minimums. That means we need more look ahead to decide which of those
|
||||
// alternatives we should predict.</p>
|
||||
//
|
||||
// <p>The basic idea is to split the set of configurations {@code C}, into
|
||||
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
|
||||
// non-conflicting configurations. Two configurations conflict if they have
|
||||
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
|
||||
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
|
||||
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
|
||||
//
|
||||
// <p>Reduce these configuration subsets to the set of possible alternatives.
|
||||
// You can compute the alternative subsets in one pass as follows:</p>
|
||||
//
|
||||
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
|
||||
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
|
||||
//
|
||||
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
//
|
||||
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
|
||||
//
|
||||
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
|
||||
// {@code s} and {@code ctx}.</p>
|
||||
//
|
||||
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
|
||||
// the union of these alternative subsets is a singleton, then no amount of
|
||||
// more lookahead will help us. We will always pick that alternative. If,
|
||||
// however, there is more than one alternative, then we are uncertain which
|
||||
// alternative to predict and must continue looking for resolution. We may
|
||||
// or may not discover an ambiguity in the future, even if there are no
|
||||
// conflicting subsets this round.</p>
|
||||
//
|
||||
// <p>The biggest sin is to terminate early because it means we've made a
|
||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
||||
// big deal you will still have the conflict. It's just inefficient. It
|
||||
// might even look until the end of file.</p>
|
||||
//
|
||||
// <p>No special consideration for semantic predicates is required because
|
||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
||||
// no configuration contains a semantic context during the termination
|
||||
// check.</p>
|
||||
//
|
||||
// <p><strong>CONFLICTING CONFIGS</strong></p>
|
||||
//
|
||||
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
|
||||
// when {@code i!=j} but {@code x=x'}. Because we merge all
|
||||
// {@code (s, i, _)} configurations together, that means that there are at
|
||||
// most {@code n} configurations associated with state {@code s} for
|
||||
// {@code n} possible alternatives in the decision. The merged stacks
|
||||
// complicate the comparison of configuration contexts {@code x} and
|
||||
// {@code x'}. Sam checks to see if one is a subset of the other by calling
|
||||
// merge and checking to see if the merged result is either {@code x} or
|
||||
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
|
||||
// is the superset, then {@code i} is the only possible prediction since the
|
||||
// others resolve to {@code min(i)} as well. However, if {@code x} is
|
||||
// associated with {@code j>i} then at least one stack configuration for
|
||||
// {@code j} is not in conflict with alternative {@code i}. The algorithm
|
||||
// should keep going, looking for more lookahead due to the uncertainty.</p>
|
||||
//
|
||||
// <p>For simplicity, I'm doing a equality check between {@code x} and
|
||||
// {@code x'} that lets the algorithm continue to consume lookahead longer
|
||||
// than necessary. The reason I like the equality is of course the
|
||||
// simplicity but also because that is the test you need to detect the
|
||||
// alternatives that are actually in conflict.</p>
|
||||
//
|
||||
// <p><strong>CONTINUE/STOP RULE</strong></p>
|
||||
//
|
||||
// <p>Continue if union of resolved alternative sets from non-conflicting and
|
||||
// conflicting alternative subsets has more than one alternative. We are
|
||||
// uncertain about which alternative to predict.</p>
|
||||
//
|
||||
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
|
||||
// alternatives are still in the running for the amount of input we've
|
||||
// consumed at this point. The conflicting sets let us to strip away
|
||||
// configurations that won't lead to more states because we resolve
|
||||
// conflicts to the configuration with a minimum alternate for the
|
||||
// conflicting set.</p>
|
||||
//
|
||||
// <p><strong>CASES</strong></p>
|
||||
//
|
||||
// <ul>
|
||||
//
|
||||
// <li>no conflicts and more than 1 alternative in set => continue</li>
|
||||
//
|
||||
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
|
||||
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
|
||||
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1,3}} => continue
|
||||
// </li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
|
||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1}} => stop and predict 1</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {1}} = {@code {1}} => stop and predict 1, can announce
|
||||
// ambiguity {@code {1,2}}</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
|
||||
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {2}} = {@code {1,2}} => continue</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
|
||||
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {3}} = {@code {1,3}} => continue</li>
|
||||
//
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
|
||||
//
|
||||
// <p>If all states Report the same conflicting set of alternatives, then we
|
||||
// know we have the exact ambiguity set.</p>
|
||||
//
|
||||
// <p><code>|A_<em>i</em>|>1</code> and
|
||||
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
|
||||
//
|
||||
// <p>In other words, we continue examining lookahead until all {@code A_i}
|
||||
// have more than one alternative and all {@code A_i} are the same. If
|
||||
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
|
||||
// because the resolved set is {@code {1}}. To determine what the real
|
||||
// ambiguity is, we have to know whether the ambiguity is between one and
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like
|
||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
// Determines if every alternative subset in {@code altsets} contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() > 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Determines if every alternative subset in {@code altsets} is equivalent.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every member of {@code altsets} is equal to the
|
||||
// others, otherwise {@code false}
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if first == nil {
|
||||
first = alts
|
||||
} else if alts != first {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the unique alternative predicted by all alternative subsets in
|
||||
// {@code altsets}. If no such alternative exists, this method returns
|
||||
// {@link ATN//INVALID_ALT_NUMBER}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
return all.minValue()
|
||||
}
|
||||
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
// Gets the complete set of represented alternatives for a collection of
|
||||
// alternative subsets. This method returns the union of each {@link BitSet}
|
||||
// in {@code altsets}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return the set of represented alternatives in {@code altsets}
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
all.or(alts)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
|
||||
// For each configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
|
||||
configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
|
||||
alts, ok := configToAlts.Get(c)
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts.Put(c, alts)
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return configToAlts.Values()
|
||||
}
|
||||
|
||||
// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
|
||||
// configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
|
||||
// </pre>
|
||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
alts := m.Get(c.GetState().String())
|
||||
if alts == nil {
|
||||
alts = NewBitSet()
|
||||
m.put(c.GetState().String(), alts)
|
||||
}
|
||||
alts.(*BitSet).add(c.GetAlt())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
|
||||
values := PredictionModeGetStateToAltMap(configs).values()
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].(*BitSet).length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
||||
result := ATNInvalidAltNumber
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
minAlt := alts.minValue()
|
||||
if result == ATNInvalidAltNumber {
|
||||
result = minAlt
|
||||
} else if result != minAlt { // more than 1 viable alt
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
generated
vendored
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
generated
vendored
@@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// A rule context is a record of a single rule invocation. It knows
|
||||
// which context invoked it, if any. If there is no parent context, then
|
||||
// naturally the invoking state is not valid. The parent link
|
||||
// provides a chain upwards from the current rule invocation to the root
|
||||
// of the invocation tree, forming a stack. We actually carry no
|
||||
// information about the rule associated with b context (except
|
||||
// when parsing). We keep only the state number of the invoking state from
|
||||
// the ATN submachine that invoked b. Contrast b with the s
|
||||
// pointer inside ParserRuleContext that tracks the current state
|
||||
// being "executed" for the current rule.
|
||||
//
|
||||
// The parent contexts are useful for computing lookahead sets and
|
||||
// getting error information.
|
||||
//
|
||||
// These objects are used during parsing and prediction.
|
||||
// For the special case of parsers, we use the subclass
|
||||
// ParserRuleContext.
|
||||
//
|
||||
// @see ParserRuleContext
|
||||
//
|
||||
|
||||
type RuleContext interface {
|
||||
RuleNode
|
||||
|
||||
GetInvokingState() int
|
||||
SetInvokingState(int)
|
||||
|
||||
GetRuleIndex() int
|
||||
IsEmpty() bool
|
||||
|
||||
GetAltNumber() int
|
||||
SetAltNumber(altNumber int)
|
||||
|
||||
String([]string, RuleContext) string
|
||||
}
|
||||
|
||||
type BaseRuleContext struct {
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
}
|
||||
|
||||
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
|
||||
|
||||
rn := new(BaseRuleContext)
|
||||
|
||||
// What context invoked b rule?
|
||||
rn.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
rn.invokingState = -1
|
||||
} else {
|
||||
rn.invokingState = invokingState
|
||||
}
|
||||
|
||||
return rn
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
b.parentCtx = nil
|
||||
} else {
|
||||
b.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetInvokingState() int {
|
||||
return b.invokingState
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetInvokingState(t int) {
|
||||
b.invokingState = t
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetRuleIndex() int {
|
||||
return b.RuleIndex
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
|
||||
|
||||
// A context is empty if there is no invoking state meaning nobody call
|
||||
// current context.
|
||||
func (b *BaseRuleContext) IsEmpty() bool {
|
||||
return b.invokingState == -1
|
||||
}
|
||||
|
||||
// Return the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
// <p>
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of b
|
||||
// method.
|
||||
//
|
||||
|
||||
func (b *BaseRuleContext) GetParent() Tree {
|
||||
return b.parentCtx
|
||||
}
|
235
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
generated
vendored
235
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
generated
vendored
@@ -1,235 +0,0 @@
|
||||
package antlr
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
_initalCapacity = 16
|
||||
_initalBucketCapacity = 8
|
||||
_loadFactor = 0.75
|
||||
)
|
||||
|
||||
type Set interface {
|
||||
Add(value interface{}) (added interface{})
|
||||
Len() int
|
||||
Get(value interface{}) (found interface{})
|
||||
Contains(value interface{}) bool
|
||||
Values() []interface{}
|
||||
Each(f func(interface{}) bool)
|
||||
}
|
||||
|
||||
type array2DHashSet struct {
|
||||
buckets [][]Collectable[any]
|
||||
hashcodeFunction func(interface{}) int
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool
|
||||
|
||||
n int // How many elements in set
|
||||
threshold int // when to expand
|
||||
|
||||
currentPrime int // jump by 4 primes each expand or whatever
|
||||
initialBucketCapacity int
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Each(f func(interface{}) bool) {
|
||||
if as.Len() < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range as.buckets {
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
if !f(o) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Values() []interface{} {
|
||||
if as.Len() < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := make([]interface{}, 0, as.Len())
|
||||
as.Each(func(i interface{}) bool {
|
||||
values = append(values, i)
|
||||
return true
|
||||
})
|
||||
return values
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Contains(value Collectable[any]) bool {
|
||||
return as.Get(value) != nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
|
||||
if as.n > as.threshold {
|
||||
as.expand()
|
||||
}
|
||||
return as.innerAdd(value)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) expand() {
|
||||
old := as.buckets
|
||||
|
||||
as.currentPrime += 4
|
||||
|
||||
var (
|
||||
newCapacity = len(as.buckets) << 1
|
||||
newTable = as.createBuckets(newCapacity)
|
||||
newBucketLengths = make([]int, len(newTable))
|
||||
)
|
||||
|
||||
as.buckets = newTable
|
||||
as.threshold = int(float64(newCapacity) * _loadFactor)
|
||||
|
||||
for _, bucket := range old {
|
||||
if bucket == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucketLength := newBucketLengths[b]
|
||||
var newBucket []Collectable[any]
|
||||
if bucketLength == 0 {
|
||||
// new bucket
|
||||
newBucket = as.createBucket(as.initialBucketCapacity)
|
||||
newTable[b] = newBucket
|
||||
} else {
|
||||
newBucket = newTable[b]
|
||||
if bucketLength == len(newBucket) {
|
||||
// expand
|
||||
newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
|
||||
copy(newBucketCopy[:bucketLength], newBucket)
|
||||
newBucket = newBucketCopy
|
||||
newTable[b] = newBucket
|
||||
}
|
||||
}
|
||||
|
||||
newBucket[bucketLength] = o
|
||||
newBucketLengths[b]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Len() int {
|
||||
return as.n
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucket := as.buckets[b]
|
||||
if bucket == nil { // no bucket
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range bucket {
|
||||
if e == nil {
|
||||
return nil // empty slot; not there
|
||||
}
|
||||
if as.equalsFunction(e, o) {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
|
||||
b := as.getBuckets(o)
|
||||
|
||||
bucket := as.buckets[b]
|
||||
|
||||
// new bucket
|
||||
if bucket == nil {
|
||||
bucket = as.createBucket(as.initialBucketCapacity)
|
||||
bucket[0] = o
|
||||
|
||||
as.buckets[b] = bucket
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
// look for it in bucket
|
||||
for i := 0; i < len(bucket); i++ {
|
||||
existing := bucket[i]
|
||||
if existing == nil { // empty slot; not there, add.
|
||||
bucket[i] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
if as.equalsFunction(existing, o) { // found existing, quit
|
||||
return existing
|
||||
}
|
||||
}
|
||||
|
||||
// full bucket, expand and add to end
|
||||
oldLength := len(bucket)
|
||||
bucketCopy := make([]Collectable[any], oldLength<<1)
|
||||
copy(bucketCopy[:oldLength], bucket)
|
||||
bucket = bucketCopy
|
||||
as.buckets[b] = bucket
|
||||
bucket[oldLength] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
|
||||
hash := as.hashcodeFunction(value)
|
||||
return hash & (len(as.buckets) - 1)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
|
||||
return make([][]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
|
||||
return make([]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func newArray2DHashSetWithCap(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
initCap int,
|
||||
initBucketCap int,
|
||||
) *array2DHashSet {
|
||||
if hashcodeFunction == nil {
|
||||
hashcodeFunction = standardHashFunction
|
||||
}
|
||||
|
||||
if equalsFunction == nil {
|
||||
equalsFunction = standardEqualsFunction
|
||||
}
|
||||
|
||||
ret := &array2DHashSet{
|
||||
hashcodeFunction: hashcodeFunction,
|
||||
equalsFunction: equalsFunction,
|
||||
|
||||
n: 0,
|
||||
threshold: int(math.Floor(_initalCapacity * _loadFactor)),
|
||||
|
||||
currentPrime: 1,
|
||||
initialBucketCapacity: initBucketCap,
|
||||
}
|
||||
|
||||
ret.buckets = ret.createBuckets(initCap)
|
||||
return ret
|
||||
}
|
||||
|
||||
func newArray2DHashSet(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
) *array2DHashSet {
|
||||
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
}
|
18
vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
18
vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
### Go template
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# No Goland stuff in this repo
|
||||
.idea
|
28
vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
28
vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
54
vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
54
vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
|
||||
[](https://pkg.go.dev/github.com/antlr4-go/antlr)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/commit-activity)
|
||||
[](https://opensource.org/licenses/BSD-3-Clause)
|
||||
[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
|
||||
# ANTLR4 Go Runtime Module Repo
|
||||
|
||||
IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
|
||||
|
||||
- Do not submit PRs or any change requests to this repo
|
||||
- This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
|
||||
- This repo contains the Go runtime that your generated projects should import
|
||||
|
||||
## Introduction
|
||||
|
||||
This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
|
||||
at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
|
||||
the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
|
||||
|
||||
The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
|
||||
|
||||
### Why?
|
||||
|
||||
The `go get` command is unable to retrieve the Go runtime when it is embedded so
|
||||
deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
|
||||
does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
|
||||
causes confusion.
|
||||
|
||||
For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
|
||||
)
|
||||
```
|
||||
|
||||
Where you would expect to see:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
|
||||
)
|
||||
```
|
||||
|
||||
The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
|
||||
from whence users can expect `go get` to behave as expected.
|
||||
|
||||
|
||||
# Documentation
|
||||
Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
|
||||
migrating existing projects to use the new module location and for information on how to use the Go runtime in
|
||||
general.
|
102
vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
102
vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Go Runtime
|
||||
|
||||
At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
|
||||
source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
|
||||
ANTLR4 that it is compatible with (I.E. uses the /v4 path).
|
||||
|
||||
However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
|
||||
of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
|
||||
This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
|
||||
list the release tag such as @4.12.0 - this was confusing, to say the least.
|
||||
|
||||
As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
|
||||
(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
|
||||
which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
|
||||
|
||||
This means that if you are using the source code without modules, you should also use the source code in the [new repo].
|
||||
Though we highly recommend that you use go modules, as they are now idiomatic for Go.
|
||||
|
||||
I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
|
||||
|
||||
Go runtime author: [Jim Idle] jimi@idle.ws
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
|
||||
it was at any point in its history.
|
||||
|
||||
Here is a general/recommended template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.1-complete.jar
|
||||
│ ├── generate.go
|
||||
│ └── generate.sh
|
||||
├── parsing - generated code goes here
|
||||
│ └── error_listeners.go
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
|
||||
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
|
||||
is to generate the code into a
|
||||
|
||||
From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
|
||||
by importing the parsing package.
|
||||
|
||||
There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
|
||||
[new repo]: https://github.com/antlr4-go/antlr
|
||||
[Jim Idle]: https://github.com/jimidle
|
||||
[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
|
||||
*/
|
||||
package antlr
|
@@ -20,10 +20,11 @@ var ATNInvalidAltNumber int
|
||||
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
|
||||
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
|
||||
type ATN struct {
|
||||
// DecisionToState is the decision points for all rules, subrules, optional
|
||||
// blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
|
||||
|
||||
// DecisionToState is the decision points for all rules, sub-rules, optional
|
||||
// blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
|
||||
// can go back later and build DFA predictors for them. This includes
|
||||
// all the rules, subrules, optional blocks, ()+, ()* etc...
|
||||
// all the rules, sub-rules, optional blocks, ()+, ()* etc...
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
@@ -51,6 +52,8 @@ type ATN struct {
|
||||
// specified, and otherwise is nil.
|
||||
ruleToTokenType []int
|
||||
|
||||
// ATNStates is a list of all states in the ATN, ordered by state number.
|
||||
//
|
||||
states []ATNState
|
||||
|
||||
mu sync.Mutex
|
335
vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
335
vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
lexerConfig = iota // Indicates that this ATNConfig is for a lexer
|
||||
parserConfig // Indicates that this ATNConfig is for a parser
|
||||
)
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive in the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context *PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
cType int // lexerConfig or parserConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
|
||||
func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
|
||||
func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
pac := &ATNConfig{}
|
||||
pac.state = state
|
||||
pac.alt = alt
|
||||
pac.context = context
|
||||
pac.semanticContext = semanticContext
|
||||
pac.cType = parserConfig
|
||||
return pac
|
||||
}
|
||||
|
||||
// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
|
||||
func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
|
||||
func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
|
||||
func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
|
||||
func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
|
||||
// are just wrappers around this one.
|
||||
func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
|
||||
}
|
||||
b := &ATNConfig{}
|
||||
b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
|
||||
b.cType = parserConfig
|
||||
return b
|
||||
}
|
||||
|
||||
func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
|
||||
|
||||
a.state = state
|
||||
a.alt = alt
|
||||
a.context = context
|
||||
a.semanticContext = semanticContext
|
||||
a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
|
||||
a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
|
||||
}
|
||||
|
||||
func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return a.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
a.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
// GetState returns the ATN state associated with this configuration
|
||||
func (a *ATNConfig) GetState() ATNState {
|
||||
return a.state
|
||||
}
|
||||
|
||||
// GetAlt returns the alternative associated with this configuration
|
||||
func (a *ATNConfig) GetAlt() int {
|
||||
return a.alt
|
||||
}
|
||||
|
||||
// SetContext sets the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) SetContext(v *PredictionContext) {
|
||||
a.context = v
|
||||
}
|
||||
|
||||
// GetContext returns the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) GetContext() *PredictionContext {
|
||||
return a.context
|
||||
}
|
||||
|
||||
// GetSemanticContext returns the semantic context associated with this configuration
|
||||
func (a *ATNConfig) GetSemanticContext() SemanticContext {
|
||||
return a.semanticContext
|
||||
}
|
||||
|
||||
// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) GetReachesIntoOuterContext() int {
|
||||
return a.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
a.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LEquals(o)
|
||||
case parserConfig:
|
||||
return a.PEquals(o)
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
|
||||
var other, ok = o.(*ATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a == other {
|
||||
return true
|
||||
} else if other == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if a.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = a.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = a.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = a.alt == other.alt
|
||||
cons = a.semanticContext.Equals(other.semanticContext)
|
||||
sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) Hash() int {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LHash()
|
||||
case parserConfig:
|
||||
return a.PHash()
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) PHash() int {
|
||||
var c int
|
||||
if a.context != nil {
|
||||
c = a.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
// String returns a string representation of the ATNConfig, usually used for debugging purposes
|
||||
func (a *ATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if a.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(a.context) + "]"
|
||||
}
|
||||
|
||||
if a.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(a.semanticContext)
|
||||
}
|
||||
|
||||
if a.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LHash() int {
|
||||
var f int
|
||||
if a.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, a.context.Hash())
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, a.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
|
||||
var otherT, ok = other.(*ATNConfig)
|
||||
if !ok {
|
||||
return false
|
||||
} else if a == otherT {
|
||||
return true
|
||||
} else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
|
||||
return true
|
||||
case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
|
||||
if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false // One but not both, are nil
|
||||
}
|
||||
|
||||
return a.PEquals(otherT)
|
||||
}
|
||||
|
||||
func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
301
vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
301
vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type ATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two ATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
|
||||
|
||||
// configs is the added elements that did not match an existing key in configLookup
|
||||
configs []*ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the ATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from this set.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
// Alts returns the combined set of alts for all the configurations in this set.
|
||||
func (b *ATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
// NewATNConfigSet creates a new ATNConfigSet instance.
|
||||
func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _),
|
||||
// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
|
||||
// 'pi' is the [ATNConfig].semanticContext.
|
||||
//
|
||||
// We use (s,i,pi) as the key.
|
||||
// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetStates returns the set of states represented by all configurations in this config set
|
||||
func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator and Hash() provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) GetPredicates() []SemanticContext {
|
||||
predicates := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
predicates = append(predicates, c)
|
||||
}
|
||||
}
|
||||
|
||||
return predicates
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
// Empty indicate no optimization is possible
|
||||
if b.configLookup == nil || b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare The configs are only equal if they are in the same order and their Equals function returns true.
|
||||
// Java uses ArrayList.equals(), which requires the same order.
|
||||
func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
if !b.configs[i].Equals(bs.configs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*ATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*ATNConfigSet)
|
||||
var eca bool
|
||||
switch {
|
||||
case b.conflictingAlts == nil && other2.conflictingAlts == nil:
|
||||
eca = true
|
||||
case b.conflictingAlts != nil && other2.conflictingAlts != nil:
|
||||
eca = b.conflictingAlts.equals(other2.conflictingAlts)
|
||||
}
|
||||
return b.configs != nil &&
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
eca &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
|
||||
if b.readOnly {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
if b.configLookup == nil {
|
||||
return false
|
||||
}
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
|
||||
return b.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
b.configs = make([]*ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) String() string {
|
||||
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
|
||||
// for use in lexers.
|
||||
func NewOrderedATNConfigSet() *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
|
||||
fullCtx: false,
|
||||
}
|
||||
}
|
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.readOnly = readOnly
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.verifyATN = verifyATN
|
||||
}
|
||||
@@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
|
||||
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
|
||||
}
|
@@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
||||
return &ATNDeserializer{options: options}
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func stringInSlice(a string, list []string) int {
|
||||
for i, b := range list {
|
||||
if b == a {
|
||||
@@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
m := a.readInt()
|
||||
|
||||
// Preallocate the needed capacity.
|
||||
@@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
|
||||
|
||||
bypassStart.endState = bypassStop
|
||||
|
||||
atn.defineDecisionState(bypassStart.BaseDecisionState)
|
||||
atn.defineDecisionState(&bypassStart.BaseDecisionState)
|
||||
|
||||
bypassStop.startState = bypassStart
|
||||
|
||||
@@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We analyze the ATN to determine if a ATN decision state is the
|
||||
// We analyze the [ATN] to determine if an ATN decision state is the
|
||||
// decision for the closure block that determines whether a
|
||||
// precedence rule should continue or complete.
|
||||
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
|
||||
@@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int {
|
||||
return int(v) // data is 32 bits but int is at least that big
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
target := atn.states[trg]
|
||||
|
||||
switch typeIndex {
|
@@ -4,7 +4,7 @@
|
||||
|
||||
package antlr
|
||||
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
|
||||
|
||||
type IATNSimulator interface {
|
||||
SharedContextCache() *PredictionContextCache
|
||||
@@ -18,22 +18,13 @@ type BaseATNSimulator struct {
|
||||
decisionToDFA []*DFA
|
||||
}
|
||||
|
||||
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
|
||||
b := new(BaseATNSimulator)
|
||||
|
||||
b.atn = atn
|
||||
b.sharedContextCache = sharedContextCache
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
|
||||
func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
|
||||
if b.sharedContextCache == nil {
|
||||
return context
|
||||
}
|
||||
|
||||
visited := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
//visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
|
||||
visited := NewVisitRecord()
|
||||
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
|
||||
}
|
||||
|
@@ -4,7 +4,11 @@
|
||||
|
||||
package antlr
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Constants for serialization.
|
||||
const (
|
||||
@@ -25,6 +29,7 @@ const (
|
||||
ATNStateInvalidStateNumber = -1
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ATNStateInitialNumTransitions = 4
|
||||
|
||||
type ATNState interface {
|
||||
@@ -73,7 +78,7 @@ type BaseATNState struct {
|
||||
transitions []Transition
|
||||
}
|
||||
|
||||
func NewBaseATNState() *BaseATNState {
|
||||
func NewATNState() *BaseATNState {
|
||||
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
|
||||
}
|
||||
|
||||
@@ -148,27 +153,46 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) {
|
||||
if len(as.transitions) == 0 {
|
||||
as.epsilonOnlyTransitions = trans.getIsEpsilon()
|
||||
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
|
||||
_, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
|
||||
as.epsilonOnlyTransitions = false
|
||||
}
|
||||
|
||||
// TODO: Check code for already present compared to the Java equivalent
|
||||
//alreadyPresent := false
|
||||
//for _, t := range as.transitions {
|
||||
// if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
|
||||
// if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
// } else if t.getIsEpsilon() && trans.getIsEpsilon() {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
//}
|
||||
//if !alreadyPresent {
|
||||
if index == -1 {
|
||||
as.transitions = append(as.transitions, trans)
|
||||
} else {
|
||||
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
|
||||
// TODO: as.transitions.splice(index, 1, trans)
|
||||
}
|
||||
//} else {
|
||||
// _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
|
||||
//}
|
||||
}
|
||||
|
||||
type BasicState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewBasicState() *BasicState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBasic
|
||||
|
||||
return &BasicState{BaseATNState: b}
|
||||
return &BasicState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type DecisionState interface {
|
||||
@@ -182,13 +206,19 @@ type DecisionState interface {
|
||||
}
|
||||
|
||||
type BaseDecisionState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
decision int
|
||||
nonGreedy bool
|
||||
}
|
||||
|
||||
func NewBaseDecisionState() *BaseDecisionState {
|
||||
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
|
||||
return &BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getDecision() int {
|
||||
@@ -216,12 +246,20 @@ type BlockStartState interface {
|
||||
|
||||
// BaseBlockStartState is the start of a regular (...) block.
|
||||
type BaseBlockStartState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
endState *BlockEndState
|
||||
}
|
||||
|
||||
func NewBlockStartState() *BaseBlockStartState {
|
||||
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
|
||||
return &BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
|
||||
@@ -233,31 +271,38 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
|
||||
}
|
||||
|
||||
type BasicBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewBasicBlockStartState() *BasicBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateBlockStart
|
||||
|
||||
return &BasicBlockStartState{BaseBlockStartState: b}
|
||||
return &BasicBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &BasicBlockStartState{}
|
||||
|
||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
||||
type BlockEndState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
startState ATNState
|
||||
}
|
||||
|
||||
func NewBlockEndState() *BlockEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBlockEnd
|
||||
|
||||
return &BlockEndState{BaseATNState: b}
|
||||
return &BlockEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockEnd,
|
||||
},
|
||||
startState: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
|
||||
@@ -265,43 +310,48 @@ func NewBlockEndState() *BlockEndState {
|
||||
// encode references to all calls to this rule to compute FOLLOW sets for error
|
||||
// handling.
|
||||
type RuleStopState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewRuleStopState() *RuleStopState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStop
|
||||
|
||||
return &RuleStopState{BaseATNState: b}
|
||||
return &RuleStopState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStop,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type RuleStartState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
stopState ATNState
|
||||
isPrecedenceRule bool
|
||||
}
|
||||
|
||||
func NewRuleStartState() *RuleStartState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStart
|
||||
|
||||
return &RuleStartState{BaseATNState: b}
|
||||
return &RuleStartState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStart,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
|
||||
// transitions: one to the loop back to start of the block, and one to exit.
|
||||
type PlusLoopbackState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStatePlusLoopBack
|
||||
|
||||
return &PlusLoopbackState{BaseDecisionState: b}
|
||||
return &PlusLoopbackState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusLoopBack,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
|
||||
@@ -309,85 +359,103 @@ func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
// it is included for completeness. In reality, PlusLoopbackState is the real
|
||||
// decision-making node for A+.
|
||||
type PlusBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewPlusBlockStartState() *PlusBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStatePlusBlockStart
|
||||
|
||||
return &PlusBlockStartState{BaseBlockStartState: b}
|
||||
return &PlusBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &PlusBlockStartState{}
|
||||
|
||||
// StarBlockStartState is the block that begins a closure loop.
|
||||
type StarBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewStarBlockStartState() *StarBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateStarBlockStart
|
||||
|
||||
return &StarBlockStartState{BaseBlockStartState: b}
|
||||
return &StarBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &StarBlockStartState{}
|
||||
|
||||
type StarLoopbackState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewStarLoopbackState() *StarLoopbackState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateStarLoopBack
|
||||
|
||||
return &StarLoopbackState{BaseATNState: b}
|
||||
return &StarLoopbackState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopBack,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type StarLoopEntryState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
loopBackState ATNState
|
||||
precedenceRuleDecision bool
|
||||
}
|
||||
|
||||
func NewStarLoopEntryState() *StarLoopEntryState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateStarLoopEntry
|
||||
|
||||
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
|
||||
return &StarLoopEntryState{BaseDecisionState: b}
|
||||
return &StarLoopEntryState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopEntry,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LoopEndState marks the end of a * or + loop.
|
||||
type LoopEndState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewLoopEndState() *LoopEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateLoopEnd
|
||||
|
||||
return &LoopEndState{BaseATNState: b}
|
||||
return &LoopEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateLoopEnd,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
|
||||
type TokensStartState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewTokensStartState() *TokensStartState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateTokenStart
|
||||
|
||||
return &TokensStartState{BaseDecisionState: b}
|
||||
return &TokensStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateTokenStart,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@@ -8,5 +8,5 @@ type CharStream interface {
|
||||
IntStream
|
||||
GetText(int, int) string
|
||||
GetTextFromTokens(start, end Token) string
|
||||
GetTextFromInterval(*Interval) string
|
||||
GetTextFromInterval(Interval) string
|
||||
}
|
@@ -28,22 +28,24 @@ type CommonTokenStream struct {
|
||||
// trivial with bt field.
|
||||
fetchedEOF bool
|
||||
|
||||
// index indexs into tokens of the current token (next token to consume).
|
||||
// index into [tokens] of the current token (next token to consume).
|
||||
// tokens[p] should be LT(1). It is set to -1 when the stream is first
|
||||
// constructed or when SetTokenSource is called, indicating that the first token
|
||||
// has not yet been fetched from the token source. For additional information,
|
||||
// see the documentation of IntStream for a description of initializing methods.
|
||||
// see the documentation of [IntStream] for a description of initializing methods.
|
||||
index int
|
||||
|
||||
// tokenSource is the TokenSource from which tokens for the bt stream are
|
||||
// tokenSource is the [TokenSource] from which tokens for the bt stream are
|
||||
// fetched.
|
||||
tokenSource TokenSource
|
||||
|
||||
// tokens is all tokens fetched from the token source. The list is considered a
|
||||
// tokens contains all tokens fetched from the token source. The list is considered a
|
||||
// complete view of the input once fetchedEOF is set to true.
|
||||
tokens []Token
|
||||
}
|
||||
|
||||
// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
|
||||
// tokens and will pull tokens from the given lexer channel.
|
||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
return &CommonTokenStream{
|
||||
channel: channel,
|
||||
@@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllTokens returns all tokens currently pulled from the token source.
|
||||
func (c *CommonTokenStream) GetAllTokens() []Token {
|
||||
return c.tokens
|
||||
}
|
||||
@@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Release(marker int) {}
|
||||
func (c *CommonTokenStream) Release(_ int) {}
|
||||
|
||||
func (c *CommonTokenStream) reset() {
|
||||
func (c *CommonTokenStream) Reset() {
|
||||
c.fetchedEOF = false
|
||||
c.tokens = make([]Token, 0)
|
||||
c.Seek(0)
|
||||
}
|
||||
|
||||
@@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() {
|
||||
// Sync makes sure index i in tokens has a token and returns true if a token is
|
||||
// located at index i and otherwise false.
|
||||
func (c *CommonTokenStream) Sync(i int) bool {
|
||||
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
|
||||
n := i - len(c.tokens) + 1 // How many more elements do we need?
|
||||
|
||||
if n > 0 {
|
||||
fetched := c.fetch(n)
|
||||
@@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
|
||||
c.tokenSource = tokenSource
|
||||
c.tokens = make([]Token, 0)
|
||||
c.index = -1
|
||||
c.fetchedEOF = false
|
||||
}
|
||||
|
||||
// NextTokenOnChannel returns the index of the next token on channel given a
|
||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
|
||||
// no tokens on channel between i and EOF.
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
|
||||
// no tokens on channel between 'i' and [TokenEOF].
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
|
||||
c.Sync(i)
|
||||
|
||||
if i >= len(c.tokens) {
|
||||
@@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
|
||||
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
|
||||
from := tokenIndex + 1
|
||||
|
||||
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
|
||||
// If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
|
||||
var to int
|
||||
|
||||
if nextOnChannel == -1 {
|
||||
@@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int {
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetAllText() string {
|
||||
return c.GetTextFromInterval(nil)
|
||||
c.Fill()
|
||||
return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
|
||||
@@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
|
||||
return c.GetTextFromInterval(interval.GetSourceInterval())
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
|
||||
c.lazyInit()
|
||||
|
||||
if interval == nil {
|
||||
c.Fill()
|
||||
interval = NewInterval(0, len(c.tokens)-1)
|
||||
} else {
|
||||
c.Sync(interval.Stop)
|
||||
}
|
||||
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
@@ -18,17 +18,20 @@ package antlr
|
||||
// type safety and avoid having to implement this for every type that we want to perform comparison on.
|
||||
//
|
||||
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
|
||||
// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
|
||||
// allows us to use it in any collection instance that does not require a special hash or equals implementation.
|
||||
type ObjEqComparator[T Collectable[T]] struct{}
|
||||
|
||||
var (
|
||||
aStateEqInst = &ObjEqComparator[ATNState]{}
|
||||
aConfEqInst = &ObjEqComparator[ATNConfig]{}
|
||||
aConfCompInst = &ATNConfigComparator[ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
|
||||
aConfEqInst = &ObjEqComparator[*ATNConfig]{}
|
||||
|
||||
// aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
|
||||
aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
|
||||
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
|
||||
semctxEqInst = &ObjEqComparator[SemanticContext]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
|
||||
pContextEqInst = &ObjEqComparator[*PredictionContext]{}
|
||||
)
|
||||
|
||||
// Equals2 delegates to the Equals() method of type T
|
||||
@@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
|
||||
|
||||
type SemCComparator[T Collectable[T]] struct{}
|
||||
|
||||
// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
|
||||
// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type ATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
@@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, o.GetState().GetStateNumber())
|
||||
h = murmurUpdate(h, o.GetContext().Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type BaseATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
|
||||
// delegates to the standard Hash() method of the ATNConfig type.
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
return o.Hash()
|
||||
}
|
214
vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
214
vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
package antlr
|
||||
|
||||
type runtimeConfiguration struct {
|
||||
statsTraceStacks bool
|
||||
lexerATNSimulatorDebug bool
|
||||
lexerATNSimulatorDFADebug bool
|
||||
parserATNSimulatorDebug bool
|
||||
parserATNSimulatorTraceATNSim bool
|
||||
parserATNSimulatorDFADebug bool
|
||||
parserATNSimulatorRetryDebug bool
|
||||
lRLoopEntryBranchOpt bool
|
||||
memoryManager bool
|
||||
}
|
||||
|
||||
// Global runtime configuration
|
||||
var runtimeConfig = runtimeConfiguration{
|
||||
lRLoopEntryBranchOpt: true,
|
||||
}
|
||||
|
||||
type runtimeOption func(*runtimeConfiguration) error
|
||||
|
||||
// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
|
||||
// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
|
||||
// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
|
||||
// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
|
||||
// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
|
||||
// memory allocation type used by the runtime such as sync.Pool or not.
|
||||
//
|
||||
// The options are applied in the order they are passed in, so the last option will override any previous options.
|
||||
//
|
||||
// For example, if you want to turn on the collection create point stack flag to true, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// If you want to turn it off, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func ConfigureRuntime(options ...runtimeOption) error {
|
||||
for _, option := range options {
|
||||
err := option(&runtimeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
|
||||
// certain structs, such as collections, or the use point of certain methods such as Put().
|
||||
// Because this can be expensive, it is turned off by default. However, it
|
||||
// can be useful to track down exactly where memory is being created and used.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func WithStatsTraceStacks(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.statsTraceStacks = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
|
||||
func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
|
||||
func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
|
||||
func WithParserATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
|
||||
// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
|
||||
func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorTraceATNSim = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
|
||||
func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// Only useful to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
|
||||
func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorRetryDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
|
||||
// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
|
||||
//
|
||||
// Note that default is to use this optimization.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
|
||||
func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lRLoopEntryBranchOpt = off
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
|
||||
// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
|
||||
// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
|
||||
// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
|
||||
// grammar, this may help make it more practical.
|
||||
//
|
||||
// Note that default is to use normal Go memory allocation and not pool memory.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
|
||||
//
|
||||
// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
|
||||
// and should remember to nil out any references to the parser or lexer when you are done with them.
|
||||
func WithMemoryManager(use bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.memoryManager = use
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -4,6 +4,8 @@
|
||||
|
||||
package antlr
|
||||
|
||||
// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
|
||||
// reach and the transitions between them.
|
||||
type DFA struct {
|
||||
// atnStartState is the ATN state in which this was created
|
||||
atnStartState DecisionState
|
||||
@@ -12,10 +14,9 @@ type DFA struct {
|
||||
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say Java
|
||||
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
|
||||
// to see if they really are the same object.
|
||||
//
|
||||
// to see if they really are the same object. Hence, we have our own map storage.
|
||||
//
|
||||
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
|
||||
|
||||
@@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
|
||||
states: nil, // Lazy initialize
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
|
||||
dfa.s0.isAcceptState = false
|
||||
dfa.s0.requiresFullContext = false
|
||||
}
|
||||
@@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool {
|
||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
if d.getPrecedenceDfa() != precedenceDfa {
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
|
||||
d.states = nil // Lazy initialize
|
||||
d.numstates = 0
|
||||
|
||||
if precedenceDfa {
|
||||
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
|
||||
precedenceState := NewDFAState(-1, NewATNConfigSet(false))
|
||||
precedenceState.setEdges(make([]*DFAState, 0))
|
||||
precedenceState.isAcceptState = false
|
||||
precedenceState.requiresFullContext = false
|
||||
@@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
|
||||
// instantiation of the states JMap.
|
||||
func (d *DFA) Len() int {
|
||||
if d.states == nil {
|
||||
return 0
|
||||
}
|
||||
return d.states.Len()
|
||||
}
|
||||
|
||||
// Get returns a state that matches s if it is present in the DFA state set. We defer to this
|
||||
// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
|
||||
func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
return nil, false
|
||||
}
|
||||
return d.states.Get(s)
|
||||
}
|
||||
|
||||
func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
|
||||
}
|
||||
return d.states.Put(s)
|
||||
}
|
||||
|
||||
func (d *DFA) getS0() *DFAState {
|
||||
return d.s0
|
||||
}
|
||||
@@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
// sortedStates returns the states in d sorted by their state number.
|
||||
// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
|
||||
func (d *DFA) sortedStates() []*DFAState {
|
||||
|
||||
if d.states == nil {
|
||||
return []*DFAState{}
|
||||
}
|
||||
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
|
||||
return i.stateNumber < j.stateNumber
|
||||
})
|
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DFASerializer is a DFA walker that knows how to dump them to serialized
|
||||
// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
|
||||
// strings.
|
||||
type DFASerializer struct {
|
||||
dfa *DFA
|
@@ -22,30 +22,31 @@ func (p *PredPrediction) String() string {
|
||||
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
|
||||
}
|
||||
|
||||
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
|
||||
// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
|
||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
|
||||
// states the ATN can be in after reading each input symbol. That is to say,
|
||||
// after reading input a1a2..an, the DFA is in a state that represents the
|
||||
// after reading input a1, a2,..an, the DFA is in a state that represents the
|
||||
// subset T of the states of the ATN that are reachable from the ATN's start
|
||||
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
|
||||
// conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the ATN could be in. We need to track the alt predicted by each state
|
||||
// state along some path labeled a1a2..an."
|
||||
//
|
||||
// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the [ATN] could be in. We need to track the alt predicted by each state
|
||||
// as well, however. More importantly, we need to maintain a stack of states,
|
||||
// tracking the closure operations as they jump from rule to rule, emulating
|
||||
// rule invocations (method calls). I have to add a stack to simulate the proper
|
||||
// lookahead sequences for the underlying LL grammar from which the ATN was
|
||||
// derived.
|
||||
//
|
||||
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
|
||||
// state (ala normal conversion) and a RuleContext describing the chain of rules
|
||||
// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
|
||||
// state (ala normal conversion) and a [RuleContext] describing the chain of rules
|
||||
// (if any) followed to arrive at that state.
|
||||
//
|
||||
// A DFAState may have multiple references to a particular state, but with
|
||||
// different ATN contexts (with same or different alts) meaning that state was
|
||||
// A [DFAState] may have multiple references to a particular state, but with
|
||||
// different [ATN] contexts (with same or different alts) meaning that state was
|
||||
// reached via a different set of rule invocations.
|
||||
type DFAState struct {
|
||||
stateNumber int
|
||||
configs ATNConfigSet
|
||||
configs *ATNConfigSet
|
||||
|
||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
||||
// Token.EOF maps to the first element.
|
||||
@@ -53,7 +54,7 @@ type DFAState struct {
|
||||
|
||||
isAcceptState bool
|
||||
|
||||
// prediction is the ttype we match or alt we predict if the state is accept.
|
||||
// prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
|
||||
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
|
||||
// requiresFullContext.
|
||||
prediction int
|
||||
@@ -81,9 +82,9 @@ type DFAState struct {
|
||||
predicates []*PredPrediction
|
||||
}
|
||||
|
||||
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
||||
func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
|
||||
if configs == nil {
|
||||
configs = NewBaseATNConfigSet(false)
|
||||
configs = NewATNConfigSet(false)
|
||||
}
|
||||
|
||||
return &DFAState{configs: configs, stateNumber: stateNumber}
|
||||
@@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int {
|
||||
var alts []int
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.GetItems() {
|
||||
for _, c := range d.configs.configs {
|
||||
alts = append(alts, c.GetAlt())
|
||||
}
|
||||
}
|
@@ -33,6 +33,7 @@ type DiagnosticErrorListener struct {
|
||||
exactOnly bool
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
|
||||
n := new(DiagnosticErrorListener)
|
||||
@@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
if d.exactOnly && !exact {
|
||||
return
|
||||
}
|
||||
@@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
|
||||
|
||||
msg := "reportAttemptingFullContext d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
@@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
|
||||
msg := "reportContextSensitivity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
@@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
|
||||
// @param configs The conflicting or ambiguous configuration set.
|
||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
||||
// returns the set of alternatives represented in {@code configs}.
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
|
||||
if ReportedAlts != nil {
|
||||
return ReportedAlts
|
||||
}
|
||||
result := NewBitSet()
|
||||
for _, c := range set.GetItems() {
|
||||
for _, c := range set.configs {
|
||||
result.add(c.GetAlt())
|
||||
}
|
||||
|
@@ -16,28 +16,29 @@ import (
|
||||
|
||||
type ErrorListener interface {
|
||||
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
|
||||
}
|
||||
|
||||
type DefaultErrorListener struct {
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDefaultErrorListener() *DefaultErrorListener {
|
||||
return new(DefaultErrorListener)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
type ConsoleErrorListener struct {
|
||||
@@ -48,21 +49,16 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
|
||||
return new(ConsoleErrorListener)
|
||||
}
|
||||
|
||||
// Provides a default instance of {@link ConsoleErrorListener}.
|
||||
// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
|
||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
|
||||
// {@inheritDoc}
|
||||
// SyntaxError prints messages to System.err containing the
|
||||
// values of line, charPositionInLine, and msg using
|
||||
// the following format:
|
||||
//
|
||||
// <p>
|
||||
// This implementation prints messages to {@link System//err} containing the
|
||||
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
|
||||
// the following format.</p>
|
||||
//
|
||||
// <pre>
|
||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
||||
// </pre>
|
||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
// line <line>:<charPositionInLine> <msg>
|
||||
func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
}
|
||||
|
||||
type ProxyErrorListener struct {
|
||||
@@ -85,19 +81,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
|
||||
}
|
@@ -21,8 +21,8 @@ type ErrorStrategy interface {
|
||||
ReportMatch(Parser)
|
||||
}
|
||||
|
||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
|
||||
// error Reporting and recovery in ANTLR parsers.
|
||||
// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
|
||||
// error reporting and recovery in ANTLR parsers.
|
||||
type DefaultErrorStrategy struct {
|
||||
errorRecoveryMode bool
|
||||
lastErrorIndex int
|
||||
@@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
||||
// The index into the input stream where the last error occurred.
|
||||
// This is used to prevent infinite loops where an error is found
|
||||
// but no token is consumed during recovery...another error is found,
|
||||
// ad nauseum. This is a failsafe mechanism to guarantee that at least
|
||||
// ad nauseam. This is a failsafe mechanism to guarantee that at least
|
||||
// one token/tree node is consumed for two errors.
|
||||
//
|
||||
d.lastErrorIndex = -1
|
||||
@@ -62,50 +62,37 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
||||
|
||||
// This method is called to enter error recovery mode when a recognition
|
||||
// exception is Reported.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
// This method is called to leave error recovery mode after recovering from
|
||||
// a recognition exception.
|
||||
//
|
||||
// @param recognizer
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = false
|
||||
d.lastErrorStates = nil
|
||||
d.lastErrorIndex = -1
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
|
||||
// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
|
||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
// ReportError is the default implementation of error reporting.
|
||||
// It returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls [beginErrorCondition]
|
||||
// and dispatches the Reporting task based on the runtime type of e
|
||||
// according to the following table.
|
||||
//
|
||||
// <p>The default implementation returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
|
||||
// and dispatches the Reporting task based on the runtime type of {@code e}
|
||||
// according to the following table.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>{@link NoViableAltException}: Dispatches the call to
|
||||
// {@link //ReportNoViableAlternative}</li>
|
||||
// <li>{@link InputMisMatchException}: Dispatches the call to
|
||||
// {@link //ReportInputMisMatch}</li>
|
||||
// <li>{@link FailedPredicateException}: Dispatches the call to
|
||||
// {@link //ReportFailedPredicate}</li>
|
||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
|
||||
// the exception</li>
|
||||
// </ul>
|
||||
// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
|
||||
// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
|
||||
// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
|
||||
// All other types : Calls [NotifyErrorListeners] to Report the exception
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
@@ -128,12 +115,10 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
|
||||
}
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
|
||||
// until we find one in the reSynchronization set--loosely the set of tokens
|
||||
// that can follow the current rule.</p>
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
// Recover is the default recovery implementation.
|
||||
// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
|
||||
// loosely the set of tokens that can follow the current rule.
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
|
||||
|
||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
||||
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
|
||||
@@ -148,54 +133,58 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException
|
||||
d.lastErrorStates = NewIntervalSet()
|
||||
}
|
||||
d.lastErrorStates.addOne(recognizer.GetState())
|
||||
followSet := d.getErrorRecoverySet(recognizer)
|
||||
followSet := d.GetErrorRecoverySet(recognizer)
|
||||
d.consumeUntil(recognizer, followSet)
|
||||
}
|
||||
|
||||
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
|
||||
// that the current lookahead symbol is consistent with what were expecting
|
||||
// at d point in the ATN. You can call d anytime but ANTLR only
|
||||
// generates code to check before subrules/loops and each iteration.
|
||||
// Sync is the default implementation of error strategy synchronization.
|
||||
//
|
||||
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
|
||||
// subrules. E.g.,</p>
|
||||
// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
|
||||
// at this point in the [ATN]. You can call this anytime but ANTLR only
|
||||
// generates code to check before sub-rules/loops and each iteration.
|
||||
//
|
||||
// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
|
||||
// sub-rules. E.g.:
|
||||
//
|
||||
// <pre>
|
||||
// a : Sync ( stuff Sync )*
|
||||
// Sync : {consume to what can follow Sync}
|
||||
// </pre>
|
||||
//
|
||||
// At the start of a sub rule upon error, {@link //Sync} performs single
|
||||
// At the start of a sub-rule upon error, Sync performs single
|
||||
// token deletion, if possible. If it can't do that, it bails on the current
|
||||
// rule and uses the default error recovery, which consumes until the
|
||||
// reSynchronization set of the current rule.
|
||||
//
|
||||
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
|
||||
// with an empty alternative), then the expected set includes what follows
|
||||
// the subrule.</p>
|
||||
// If the sub-rule is optional
|
||||
//
|
||||
// <p>During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.</p>
|
||||
// ({@code (...)?}, {@code (...)*},
|
||||
//
|
||||
// <p><strong>ORIGINS</strong></p>
|
||||
// or a block with an empty alternative), then the expected set includes what follows
|
||||
// the sub-rule.
|
||||
//
|
||||
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.
|
||||
//
|
||||
// # Origins
|
||||
//
|
||||
// Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// A single mismatch token or missing token would force the parser to bail
|
||||
// out of the entire rules surrounding the loop. So, for rule</p>
|
||||
// out of the entire rules surrounding the loop. So, for rule:
|
||||
//
|
||||
// <pre>
|
||||
// classfunc : 'class' ID '{' member* '}'
|
||||
// </pre>
|
||||
//
|
||||
// input with an extra token between members would force the parser to
|
||||
// consume until it found the next class definition rather than the next
|
||||
// member definition of the current class.
|
||||
//
|
||||
// <p>This functionality cost a little bit of effort because the parser has to
|
||||
// compare token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off d
|
||||
// functionality by simply overriding d method as a blank { }.</p>
|
||||
// This functionality cost a bit of effort because the parser has to
|
||||
// compare the token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off this
|
||||
// functionality by simply overriding this method as empty:
|
||||
//
|
||||
// { }
|
||||
//
|
||||
// [Jim Idle]: https://github.com/jimidle
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
@@ -217,25 +206,21 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
if d.SingleTokenDeletion(recognizer) != nil {
|
||||
return
|
||||
}
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
expecting := NewIntervalSet()
|
||||
expecting.addSet(recognizer.GetExpectedTokens())
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
|
||||
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
||||
default:
|
||||
// do nothing if we can't identify the exact kind of ATN state
|
||||
}
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link NoViableAltException}.
|
||||
// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
// See also [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
||||
tokens := recognizer.GetTokenStream()
|
||||
var input string
|
||||
@@ -252,48 +237,38 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is an
|
||||
// {@link InputMisMatchException}.
|
||||
// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
|
||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link FailedPredicateException}.
|
||||
// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
||||
msg := "rule " + ruleName + " " + e.message
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the removal
|
||||
// ReportUnwantedToken is called to report a syntax error that requires the removal
|
||||
// of a token from the input stream. At the time d method is called, the
|
||||
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
|
||||
// removed from the input stream. When d method returns,
|
||||
// {@code recognizer} is in error recovery mode.
|
||||
// erroneous symbol is the current LT(1) symbol and has not yet been
|
||||
// removed from the input stream. When this method returns,
|
||||
// recognizer is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenDeletion} identifies
|
||||
// This method is called when singleTokenDeletion identifies
|
||||
// single-token deletion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
// input error.
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@@ -307,21 +282,18 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time d
|
||||
// method is called, the missing token has not yet been inserted. When d
|
||||
// method returns, {@code recognizer} is in error recovery mode.
|
||||
// ReportMissingToken is called to report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time this
|
||||
// method is called, the missing token has not yet been inserted. When this
|
||||
// method returns, recognizer is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenInsertion} identifies
|
||||
// This method is called when singleTokenInsertion identifies
|
||||
// single-token insertion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
// input error.
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@@ -334,54 +306,48 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// <p>The default implementation attempts to recover from the mismatched input
|
||||
// The RecoverInline default implementation attempts to recover from the mismatched input
|
||||
// by using single token insertion and deletion as described below. If the
|
||||
// recovery attempt fails, d method panics an
|
||||
// {@link InputMisMatchException}.</p>
|
||||
// recovery attempt fails, this method panics with [InputMisMatchException}.
|
||||
// TODO: Not sure that panic() is the right thing to do here - JI
|
||||
//
|
||||
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
|
||||
// # EXTRA TOKEN (single token deletion)
|
||||
//
|
||||
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
|
||||
// right token, however, then assume {@code LA(1)} is some extra spurious
|
||||
// LA(1) is not what we are looking for. If LA(2) has the
|
||||
// right token, however, then assume LA(1) is some extra spurious
|
||||
// token and delete it. Then consume and return the next token (which was
|
||||
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
|
||||
// the LA(2) token) as the successful result of the Match operation.
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenDeletion}.</p>
|
||||
// # This recovery strategy is implemented by singleTokenDeletion
|
||||
//
|
||||
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
|
||||
// # MISSING TOKEN (single token insertion)
|
||||
//
|
||||
// <p>If current token (at {@code LA(1)}) is consistent with what could come
|
||||
// after the expected {@code LA(1)} token, then assume the token is missing
|
||||
// and use the parser's {@link TokenFactory} to create it on the fly. The
|
||||
// "insertion" is performed by returning the created token as the successful
|
||||
// result of the Match operation.</p>
|
||||
// If current token -at LA(1) - is consistent with what could come
|
||||
// after the expected LA(1) token, then assume the token is missing
|
||||
// and use the parser's [TokenFactory] to create it on the fly. The
|
||||
// “insertion” is performed by returning the created token as the successful
|
||||
// result of the Match operation.
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenInsertion}.</p>
|
||||
// This recovery strategy is implemented by [SingleTokenInsertion].
|
||||
//
|
||||
// <p><strong>EXAMPLE</strong></p>
|
||||
// # Example
|
||||
//
|
||||
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
|
||||
// the parser returns from the nested call to {@code expr}, it will have
|
||||
// call chain:</p>
|
||||
// For example, Input i=(3 is clearly missing the ')'. When
|
||||
// the parser returns from the nested call to expr, it will have
|
||||
// call the chain:
|
||||
//
|
||||
// <pre>
|
||||
// stat &rarr expr &rarr atom
|
||||
// </pre>
|
||||
// stat → expr → atom
|
||||
//
|
||||
// and it will be trying to Match the {@code ')'} at d point in the
|
||||
// and it will be trying to Match the ')' at this point in the
|
||||
// derivation:
|
||||
//
|
||||
// <pre>
|
||||
// => ID '=' '(' INT ')' ('+' atom)* ”
|
||||
// : ID '=' '(' INT ')' ('+' atom)* ';'
|
||||
// ^
|
||||
// </pre>
|
||||
//
|
||||
// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
|
||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
|
||||
// is in the set of tokens that can follow the {@code ')'} token reference
|
||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
||||
// The attempt to [Match] ')' will fail when it sees ';' and
|
||||
// call [RecoverInline]. To recover, it sees that LA(1)==';'
|
||||
// is in the set of tokens that can follow the ')' token reference
|
||||
// in rule atom. It can assume that you forgot the ')'.
|
||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// SINGLE TOKEN DELETION
|
||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
||||
@@ -396,24 +362,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
return d.GetMissingSymbol(recognizer)
|
||||
}
|
||||
// even that didn't work must panic the exception
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
return nil
|
||||
}
|
||||
|
||||
// This method implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} if the single-token
|
||||
// SingleTokenInsertion implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] if the single-token
|
||||
// deletion strategy fails to recover from the mismatched input. If this
|
||||
// method returns {@code true}, {@code recognizer} will be in error recovery
|
||||
// mode.
|
||||
//
|
||||
// <p>This method determines whether or not single-token insertion is viable by
|
||||
// checking if the {@code LA(1)} input symbol could be successfully Matched
|
||||
// if it were instead the {@code LA(2)} symbol. If d method returns
|
||||
// This method determines whether single-token insertion is viable by
|
||||
// checking if the LA(1) input symbol could be successfully Matched
|
||||
// if it were instead the LA(2) symbol. If this method returns
|
||||
// {@code true}, the caller is responsible for creating and inserting a
|
||||
// token with the correct type to produce d behavior.</p>
|
||||
// token with the correct type to produce this behavior.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return {@code true} if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input, otherwise {@code false}
|
||||
// This func returns true if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input.
|
||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
||||
// if current token is consistent with what could come after current
|
||||
@@ -431,23 +397,21 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// This method implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} to attempt to recover
|
||||
// SingleTokenDeletion implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] to attempt to recover
|
||||
// from mismatched input. If this method returns nil, the parser and error
|
||||
// handler state will not have changed. If this method returns non-nil,
|
||||
// {@code recognizer} will <em>not</em> be in error recovery mode since the
|
||||
// recognizer will not be in error recovery mode since the
|
||||
// returned token was a successful Match.
|
||||
//
|
||||
// <p>If the single-token deletion is successful, d method calls
|
||||
// {@link //ReportUnwantedToken} to Report the error, followed by
|
||||
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
|
||||
// before returning {@link //ReportMatch} is called to signal a successful
|
||||
// Match.</p>
|
||||
// If the single-token deletion is successful, this method calls
|
||||
// [ReportUnwantedToken] to Report the error, followed by
|
||||
// [Consume] to actually “delete” the extraneous token. Then,
|
||||
// before returning, [ReportMatch] is called to signal a successful
|
||||
// Match.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return the successfully Matched {@link Token} instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise
|
||||
// {@code nil}
|
||||
// The func returns the successfully Matched [Token] instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise nil.
|
||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@@ -467,24 +431,28 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conjure up a missing token during error recovery.
|
||||
// GetMissingSymbol conjures up a missing token during error recovery.
|
||||
//
|
||||
// The recognizer attempts to recover from single missing
|
||||
// symbols. But, actions might refer to that missing symbol.
|
||||
// For example, x=ID {f($x)}. The action clearly assumes
|
||||
// For example:
|
||||
//
|
||||
// x=ID {f($x)}.
|
||||
//
|
||||
// The action clearly assumes
|
||||
// that there has been an identifier Matched previously and that
|
||||
// $x points at that token. If that token is missing, but
|
||||
// the next token in the stream is what we want we assume that
|
||||
// d token is missing and we keep going. Because we
|
||||
// this token is missing, and we keep going. Because we
|
||||
// have to return some token to replace the missing token,
|
||||
// we have to conjure one up. This method gives the user control
|
||||
// over the tokens returned for missing tokens. Mostly,
|
||||
// you will want to create something special for identifier
|
||||
// tokens. For literals such as '{' and ',', the default
|
||||
// action in the parser or tree parser works. It simply creates
|
||||
// a CommonToken of the appropriate type. The text will be the token.
|
||||
// If you change what tokens must be created by the lexer,
|
||||
// override d method to create the appropriate tokens.
|
||||
// a [CommonToken] of the appropriate type. The text will be the token name.
|
||||
// If you need to change which tokens must be created by the lexer,
|
||||
// override this method to create the appropriate tokens.
|
||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
currentSymbol := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@@ -498,7 +466,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
if expectedTokenType > 0 && expectedTokenType < len(ln) {
|
||||
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
|
||||
} else {
|
||||
tokenText = "<missing undefined>" // TODO matches the JS impl
|
||||
tokenText = "<missing undefined>" // TODO: matches the JS impl
|
||||
}
|
||||
}
|
||||
current := currentSymbol
|
||||
@@ -516,13 +484,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
|
||||
return recognizer.GetExpectedTokens()
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// GetTokenErrorDisplay determines how a token should be displayed in an error message.
|
||||
// The default is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override this func in that case
|
||||
// to use t.String() (which, for [CommonToken], dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
// so that it creates a new type.
|
||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
@@ -545,33 +513,38 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
// Compute the error recovery set for the current rule. During
|
||||
// GetErrorRecoverySet computes the error recovery set for the current rule. During
|
||||
// rule invocation, the parser pushes the set of tokens that can
|
||||
// follow that rule reference on the stack d amounts to
|
||||
// follow that rule reference on the stack. This amounts to
|
||||
// computing FIRST of what follows the rule reference in the
|
||||
// enclosing rule. See LinearApproximator.FIRST().
|
||||
//
|
||||
// This local follow set only includes tokens
|
||||
// from within the rule i.e., the FIRST computation done by
|
||||
// ANTLR stops at the end of a rule.
|
||||
//
|
||||
// # EXAMPLE
|
||||
// # Example
|
||||
//
|
||||
// When you find a "no viable alt exception", the input is not
|
||||
// consistent with any of the alternatives for rule r. The best
|
||||
// thing to do is to consume tokens until you see something that
|
||||
// can legally follow a call to r//or* any rule that called r.
|
||||
// can legally follow a call to r or any rule that called r.
|
||||
// You don't want the exact set of viable next tokens because the
|
||||
// input might just be missing a token--you might consume the
|
||||
// rest of the input looking for one of the missing tokens.
|
||||
//
|
||||
// Consider grammar:
|
||||
// Consider the grammar:
|
||||
//
|
||||
// a : '[' b ']'
|
||||
// | '(' b ')'
|
||||
// ;
|
||||
//
|
||||
// b : c '^' INT
|
||||
// ;
|
||||
//
|
||||
// c : ID
|
||||
// | INT
|
||||
// ;
|
||||
//
|
||||
// At each rule invocation, the set of tokens that could follow
|
||||
// that rule is pushed on a stack. Here are the various
|
||||
@@ -581,13 +554,13 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
//
|
||||
// Upon erroneous input "[]", the call chain is
|
||||
// Upon erroneous input “[]”, the call chain is
|
||||
//
|
||||
// a -> b -> c
|
||||
// a → b → c
|
||||
//
|
||||
// and, hence, the follow context stack is:
|
||||
//
|
||||
// depth follow set start of rule execution
|
||||
// Depth Follow set Start of rule execution
|
||||
// 0 <EOF> a (from main())
|
||||
// 1 ']' b
|
||||
// 2 '^' c
|
||||
@@ -598,11 +571,14 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
//
|
||||
// For error recovery, we cannot consider FOLLOW(c)
|
||||
// (context-sensitive or otherwise). We need the combined set of
|
||||
// all context-sensitive FOLLOW sets--the set of all tokens that
|
||||
// all context-sensitive FOLLOW sets - the set of all tokens that
|
||||
// could follow any reference in the call chain. We need to
|
||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
|
||||
// we reSync'd to that token, we'd consume until EOF. We need to
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c:
|
||||
//
|
||||
// {']','^'}
|
||||
//
|
||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
||||
// not consume anything. After printing an error, rule c would
|
||||
// return normally. Rule b would not find the required '^' though.
|
||||
@@ -620,22 +596,19 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
//
|
||||
// ANTLR's error recovery mechanism is based upon original ideas:
|
||||
//
|
||||
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
||||
// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
|
||||
// [A note on error recovery in recursive descent parsers].
|
||||
//
|
||||
// and
|
||||
// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers]
|
||||
//
|
||||
// "A note on error recovery in recursive descent parsers":
|
||||
// http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
|
||||
// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
|
||||
//
|
||||
// Later, Josef Grosch had some good ideas:
|
||||
//
|
||||
// "Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers":
|
||||
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
//
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
||||
// at run-time upon error to avoid overhead during parsing.
|
||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
|
||||
// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
ctx := recognizer.GetParserRuleContext()
|
||||
recoverSet := NewIntervalSet()
|
||||
@@ -660,40 +633,36 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
|
||||
// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
|
||||
// by immediately canceling the parse operation with a
|
||||
// {@link ParseCancellationException}. The implementation ensures that the
|
||||
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
|
||||
// [ParseCancellationException]. The implementation ensures that the
|
||||
// [ParserRuleContext//exception] field is set for all parse tree nodes
|
||||
// that were not completed prior to encountering the error.
|
||||
//
|
||||
// <p>
|
||||
// This error strategy is useful in the following scenarios.</p>
|
||||
// This error strategy is useful in the following scenarios.
|
||||
//
|
||||
// <ul>
|
||||
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
|
||||
// - Two-stage parsing: This error strategy allows the first
|
||||
// stage of two-stage parsing to immediately terminate if an error is
|
||||
// encountered, and immediately fall back to the second stage. In addition to
|
||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
||||
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
|
||||
// the first stage.</li>
|
||||
// <li><strong>Silent validation:</strong> When syntax errors are not being
|
||||
// implementation of [BailErrorStrategy.Sync] improves the performance of
|
||||
// the first stage.
|
||||
//
|
||||
// - Silent validation: When syntax errors are not being
|
||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
||||
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.</li>
|
||||
// </ul>
|
||||
// the [BailErrorStrategy] avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.
|
||||
//
|
||||
// <p>
|
||||
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
|
||||
// myparser.SetErrorHandler(NewBailErrorStrategy())
|
||||
//
|
||||
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
|
||||
|
||||
// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
|
||||
type BailErrorStrategy struct {
|
||||
*DefaultErrorStrategy
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &BailErrorStrategy{}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
|
||||
b := new(BailErrorStrategy)
|
||||
@@ -703,10 +672,10 @@ func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
return b
|
||||
}
|
||||
|
||||
// Instead of recovering from exception {@code e}, re-panic it wrapped
|
||||
// in a {@link ParseCancellationException} so it is not caught by the
|
||||
// rule func catches. Use {@link Exception//getCause()} to get the
|
||||
// original {@link RecognitionException}.
|
||||
// Recover Instead of recovering from exception e, re-panic it wrapped
|
||||
// in a [ParseCancellationException] so it is not caught by the
|
||||
// rule func catches. Use Exception.GetCause() to get the
|
||||
// original [RecognitionException].
|
||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
@@ -717,10 +686,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context = nil
|
||||
}
|
||||
}
|
||||
panic(NewParseCancellationException()) // TODO we don't emit e properly
|
||||
recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover inline if the parser
|
||||
// RecoverInline makes sure we don't attempt to recover inline if the parser
|
||||
// successfully recovers, it won't panic an exception.
|
||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
||||
@@ -728,7 +697,6 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover from problems in subrules.//
|
||||
func (b *BailErrorStrategy) Sync(recognizer Parser) {
|
||||
// pass
|
||||
// Sync makes sure we don't attempt to recover from problems in sub-rules.
|
||||
func (b *BailErrorStrategy) Sync(_ Parser) {
|
||||
}
|
@@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
|
||||
// } else {
|
||||
// stack := NewError().stack
|
||||
// }
|
||||
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
// TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
|
||||
t := new(BaseRecognitionException)
|
||||
|
||||
@@ -43,15 +43,17 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
|
||||
t.recognizer = recognizer
|
||||
t.input = input
|
||||
t.ctx = ctx
|
||||
// The current {@link Token} when an error occurred. Since not all streams
|
||||
|
||||
// The current Token when an error occurred. Since not all streams
|
||||
// support accessing symbols by index, we have to track the {@link Token}
|
||||
// instance itself.
|
||||
//
|
||||
t.offendingToken = nil
|
||||
|
||||
// Get the ATN state number the parser was in at the time the error
|
||||
// occurred. For {@link NoViableAltException} and
|
||||
// {@link LexerNoViableAltException} exceptions, this is the
|
||||
// {@link DecisionState} number. For others, it is the state whose outgoing
|
||||
// edge we couldn't Match.
|
||||
// occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
|
||||
// DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
|
||||
//
|
||||
t.offendingState = -1
|
||||
if t.recognizer != nil {
|
||||
t.offendingState = t.recognizer.GetState()
|
||||
@@ -74,15 +76,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
|
||||
|
||||
// <p>If the state number is not known, b method returns -1.</p>
|
||||
|
||||
// Gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time b exception was panicn.
|
||||
// getExpectedTokens gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time this exception was raised.
|
||||
//
|
||||
// <p>If the set of expected tokens is not known and could not be computed,
|
||||
// b method returns {@code nil}.</p>
|
||||
// If the set of expected tokens is not known and could not be computed,
|
||||
// this method returns nil.
|
||||
//
|
||||
// @return The set of token types that could potentially follow the current
|
||||
// state in the ATN, or {@code nil} if the information is not available.
|
||||
// /
|
||||
// The func returns the set of token types that could potentially follow the current
|
||||
// state in the {ATN}, or nil if the information is not available.
|
||||
|
||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
|
||||
if b.recognizer != nil {
|
||||
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
|
||||
@@ -99,10 +101,10 @@ type LexerNoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startIndex int
|
||||
deadEndConfigs ATNConfigSet
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
|
||||
|
||||
l := new(LexerNoViableAltException)
|
||||
|
||||
@@ -128,14 +130,16 @@ type NoViableAltException struct {
|
||||
startToken Token
|
||||
offendingToken Token
|
||||
ctx ParserRuleContext
|
||||
deadEndConfigs ATNConfigSet
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
// Indicates that the parser could not decide which of two or more paths
|
||||
// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
|
||||
// to take based upon the remaining input. It tracks the starting token
|
||||
// of the offending input and also knows where the parser was
|
||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
// in the various paths when the error.
|
||||
//
|
||||
// Reported by [ReportNoViableAlternative]
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
|
||||
if ctx == nil {
|
||||
ctx = recognizer.GetParserRuleContext()
|
||||
@@ -157,12 +161,14 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To
|
||||
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
|
||||
|
||||
// Which configurations did we try at input.Index() that couldn't Match
|
||||
// input.LT(1)?//
|
||||
// input.LT(1)
|
||||
n.deadEndConfigs = deadEndConfigs
|
||||
|
||||
// The token object at the start index the input stream might
|
||||
// not be buffering tokens so get a reference to it. (At the
|
||||
// time the error occurred, of course the stream needs to keep a
|
||||
// buffer all of the tokens but later we might not have access to those.)
|
||||
// not be buffering tokens so get a reference to it.
|
||||
//
|
||||
// At the time the error occurred, of course the stream needs to keep a
|
||||
// buffer of all the tokens, but later we might not have access to those.
|
||||
n.startToken = startToken
|
||||
n.offendingToken = offendingToken
|
||||
|
||||
@@ -173,7 +179,7 @@ type InputMisMatchException struct {
|
||||
*BaseRecognitionException
|
||||
}
|
||||
|
||||
// This signifies any kind of mismatched input exceptions such as
|
||||
// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
|
||||
// when the current input does not Match the expected token.
|
||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
@@ -186,11 +192,10 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
}
|
||||
|
||||
// A semantic predicate failed during validation. Validation of predicates
|
||||
// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
|
||||
// occurs when normally parsing the alternative just like Matching a token.
|
||||
// Disambiguating predicate evaluation occurs when we test a predicate during
|
||||
// prediction.
|
||||
|
||||
type FailedPredicateException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
@@ -199,6 +204,7 @@ type FailedPredicateException struct {
|
||||
predicate string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
|
||||
|
||||
f := new(FailedPredicateException)
|
||||
@@ -231,6 +237,21 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri
|
||||
type ParseCancellationException struct {
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetOffendingToken() Token {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetMessage() string {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetInputStream() IntStream {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewParseCancellationException() *ParseCancellationException {
|
||||
// Error.call(this)
|
||||
// Error.captureStackTrace(this, ParseCancellationException)
|
@@ -5,8 +5,7 @@
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"bufio"
|
||||
"os"
|
||||
)
|
||||
|
||||
@@ -14,34 +13,53 @@ import (
|
||||
// when you construct the object.
|
||||
|
||||
type FileStream struct {
|
||||
*InputStream
|
||||
|
||||
InputStream
|
||||
filename string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFileStream(fileName string) (*FileStream, error) {
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(buf, f)
|
||||
|
||||
defer func(f *os.File) {
|
||||
errF := f.Close()
|
||||
if errF != nil {
|
||||
}
|
||||
}(f)
|
||||
|
||||
reader := bufio.NewReader(f)
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := new(FileStream)
|
||||
fs := &FileStream{
|
||||
InputStream: InputStream{
|
||||
index: 0,
|
||||
name: fileName,
|
||||
},
|
||||
filename: fileName,
|
||||
}
|
||||
|
||||
fs.filename = fileName
|
||||
s := string(buf.Bytes())
|
||||
|
||||
fs.InputStream = NewInputStream(s)
|
||||
// Pre-build the buffer and read runes efficiently
|
||||
//
|
||||
fs.data = make([]rune, 0, fInfo.Size())
|
||||
for {
|
||||
r, _, err := reader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fs.data = append(fs.data, r)
|
||||
}
|
||||
fs.size = len(fs.data) // Size in runes
|
||||
|
||||
// All done.
|
||||
//
|
||||
return fs, nil
|
||||
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSourceName() string {
|
157
vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
157
vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
// NewIoStream creates a new input stream from the given io.Reader reader.
|
||||
// Note that the reader is read completely into memory and so it must actually
|
||||
// have a stopping point - you cannot pass in a reader on an open-ended source such
|
||||
// as a socket for instance.
|
||||
func NewIoStream(reader io.Reader) *InputStream {
|
||||
|
||||
rReader := bufio.NewReader(reader)
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
}
|
||||
|
||||
// Pre-build the buffer and read runes reasonably efficiently given that
|
||||
// we don't exactly know how big the input is.
|
||||
//
|
||||
is.data = make([]rune, 0, 512)
|
||||
for {
|
||||
r, _, err := rReader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
is.data = append(is.data, r)
|
||||
}
|
||||
is.size = len(is.data) // number of runes
|
||||
return is
|
||||
}
|
||||
|
||||
// NewInputStream creates a new input stream from the given string
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
data: []rune(data), // This is actually the most efficient way
|
||||
}
|
||||
is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
// Consume moves the input pointer to the next character in the input stream
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
// LA returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
// LT returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
// Index returns the current offset in to the input stream
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
// Size returns the total number of characters in the input stream
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// Mark does nothing here as we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Release does nothing here as we have entire buffer
|
||||
func (is *InputStream) Release(_ int) {
|
||||
}
|
||||
|
||||
// Seek the input point to the provided index offset
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
// GetText returns the text from the input stream from the start to the stop index
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
|
||||
// character of the stop token
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// String returns the entire input stream as a string
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
@@ -14,20 +14,21 @@ type Interval struct {
|
||||
Stop int
|
||||
}
|
||||
|
||||
/* stop is not included! */
|
||||
func NewInterval(start, stop int) *Interval {
|
||||
i := new(Interval)
|
||||
|
||||
i.Start = start
|
||||
i.Stop = stop
|
||||
return i
|
||||
// NewInterval creates a new interval with the given start and stop values.
|
||||
func NewInterval(start, stop int) Interval {
|
||||
return Interval{
|
||||
Start: start,
|
||||
Stop: stop,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Interval) Contains(item int) bool {
|
||||
// Contains returns true if the given item is contained within the interval.
|
||||
func (i Interval) Contains(item int) bool {
|
||||
return item >= i.Start && item < i.Stop
|
||||
}
|
||||
|
||||
func (i *Interval) String() string {
|
||||
// String generates a string representation of the interval.
|
||||
func (i Interval) String() string {
|
||||
if i.Start == i.Stop-1 {
|
||||
return strconv.Itoa(i.Start)
|
||||
}
|
||||
@@ -35,15 +36,18 @@ func (i *Interval) String() string {
|
||||
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
|
||||
}
|
||||
|
||||
func (i *Interval) length() int {
|
||||
// Length returns the length of the interval.
|
||||
func (i Interval) Length() int {
|
||||
return i.Stop - i.Start
|
||||
}
|
||||
|
||||
// IntervalSet represents a collection of [Intervals], which may be read-only.
|
||||
type IntervalSet struct {
|
||||
intervals []*Interval
|
||||
intervals []Interval
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
// NewIntervalSet creates a new empty, writable, interval set.
|
||||
func NewIntervalSet() *IntervalSet {
|
||||
|
||||
i := new(IntervalSet)
|
||||
@@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet {
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) Equals(other *IntervalSet) bool {
|
||||
if len(i.intervals) != len(other.intervals) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range i.intervals {
|
||||
if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *IntervalSet) first() int {
|
||||
if len(i.intervals) == 0 {
|
||||
return TokenInvalidType
|
||||
@@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) {
|
||||
i.addInterval(NewInterval(l, h+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addInterval(v *Interval) {
|
||||
func (i *IntervalSet) addInterval(v Interval) {
|
||||
if i.intervals == nil {
|
||||
i.intervals = make([]*Interval, 0)
|
||||
i.intervals = make([]Interval, 0)
|
||||
i.intervals = append(i.intervals, v)
|
||||
} else {
|
||||
// find insert pos
|
||||
for k, interval := range i.intervals {
|
||||
// distinct range -> insert
|
||||
if v.Stop < interval.Start {
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Stop == interval.Start {
|
||||
i.intervals[k].Start = v.Start
|
||||
@@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool {
|
||||
}
|
||||
|
||||
func (i *IntervalSet) length() int {
|
||||
len := 0
|
||||
iLen := 0
|
||||
|
||||
for _, v := range i.intervals {
|
||||
len += v.length()
|
||||
iLen += v.Length()
|
||||
}
|
||||
|
||||
return len
|
||||
return iLen
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeRange(v *Interval) {
|
||||
func (i *IntervalSet) removeRange(v Interval) {
|
||||
if v.Start == v.Stop-1 {
|
||||
i.removeOne(v.Start)
|
||||
} else if i.intervals != nil {
|
||||
@@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
x := NewInterval(v.Stop, ni.Stop)
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
|
||||
// i.intervals.splice(k, 1)
|
||||
@@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) {
|
||||
x := NewInterval(ki.Start, v)
|
||||
ki.Start = v + 1
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
|
||||
return i.toIndexString()
|
||||
}
|
||||
|
||||
func (i *IntervalSet) GetIntervals() []*Interval {
|
||||
func (i *IntervalSet) GetIntervals() []Interval {
|
||||
return i.intervals
|
||||
}
|
||||
|
685
vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
685
vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
@@ -0,0 +1,685 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
type CollectionSource int
|
||||
type CollectionDescriptor struct {
|
||||
SybolicName string
|
||||
Description string
|
||||
}
|
||||
|
||||
const (
|
||||
UnknownCollection CollectionSource = iota
|
||||
ATNConfigLookupCollection
|
||||
ATNStateCollection
|
||||
DFAStateCollection
|
||||
ATNConfigCollection
|
||||
PredictionContextCollection
|
||||
SemanticContextCollection
|
||||
ClosureBusyCollection
|
||||
PredictionVisitedCollection
|
||||
MergeCacheCollection
|
||||
PredictionContextCacheCollection
|
||||
AltSetCollection
|
||||
ReachSetCollection
|
||||
)
|
||||
|
||||
var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
|
||||
UnknownCollection: {
|
||||
SybolicName: "UnknownCollection",
|
||||
Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
|
||||
},
|
||||
ATNConfigCollection: {
|
||||
SybolicName: "ATNConfigCollection",
|
||||
Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"For instance, it is used to store the results of the closure() operation in the ATN.",
|
||||
},
|
||||
ATNConfigLookupCollection: {
|
||||
SybolicName: "ATNConfigLookupCollection",
|
||||
Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
|
||||
},
|
||||
ATNStateCollection: {
|
||||
SybolicName: "ATNStateCollection",
|
||||
Description: "ATNState collection. This is used to store the states of the ATN.",
|
||||
},
|
||||
DFAStateCollection: {
|
||||
SybolicName: "DFAStateCollection",
|
||||
Description: "DFAState collection. This is used to store the states of the DFA.",
|
||||
},
|
||||
PredictionContextCollection: {
|
||||
SybolicName: "PredictionContextCollection",
|
||||
Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
|
||||
},
|
||||
SemanticContextCollection: {
|
||||
SybolicName: "SemanticContextCollection",
|
||||
Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
|
||||
},
|
||||
ClosureBusyCollection: {
|
||||
SybolicName: "ClosureBusyCollection",
|
||||
Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
|
||||
"It stores ATNConfigs that are currently being processed in the closure() operation.",
|
||||
},
|
||||
PredictionVisitedCollection: {
|
||||
SybolicName: "PredictionVisitedCollection",
|
||||
Description: "A map that records whether we have visited a particular context when searching through cached entries.",
|
||||
},
|
||||
MergeCacheCollection: {
|
||||
SybolicName: "MergeCacheCollection",
|
||||
Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
|
||||
},
|
||||
PredictionContextCacheCollection: {
|
||||
SybolicName: "PredictionContextCacheCollection",
|
||||
Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
|
||||
},
|
||||
AltSetCollection: {
|
||||
SybolicName: "AltSetCollection",
|
||||
Description: "Used to eliminate duplicate alternatives in an ATN config set.",
|
||||
},
|
||||
ReachSetCollection: {
|
||||
SybolicName: "ReachSetCollection",
|
||||
Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
|
||||
},
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
s.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
s.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(s.stats)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
|
||||
|
||||
if collectStats {
|
||||
s.stats.Puts++
|
||||
}
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
var hClash bool
|
||||
for _, v1 := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
if collectStats {
|
||||
s.stats.PutHits++
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
return v1, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats && hClash {
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
|
||||
if collectStats {
|
||||
if len(s.store[kh]) > s.stats.MaxSlotSize {
|
||||
s.stats.MaxSlotSize = len(s.store[kh])
|
||||
}
|
||||
}
|
||||
s.len++
|
||||
if collectStats {
|
||||
s.stats.CurSize = s.len
|
||||
if s.len > s.stats.MaxSize {
|
||||
s.stats.MaxSize = s.len
|
||||
}
|
||||
}
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) {
|
||||
if collectStats {
|
||||
s.stats.Gets++
|
||||
}
|
||||
kh := s.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, v := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(key, v) {
|
||||
if collectStats {
|
||||
s.stats.GetHits++
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
s.stats.GetNoEnt++
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool {
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
vs = append(vs, e...)
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
|
||||
m := &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Puts++
|
||||
}
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.PutHits++
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
if collectStats {
|
||||
if len(m.store[kh]) > m.stats.MaxSlotSize {
|
||||
m.stats.MaxSlotSize = len(m.store[kh])
|
||||
}
|
||||
}
|
||||
m.len++
|
||||
if collectStats {
|
||||
m.stats.CurSize = m.len
|
||||
if m.len > m.stats.MaxSize {
|
||||
m.stats.MaxSize = m.len
|
||||
}
|
||||
}
|
||||
return val, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Gets++
|
||||
}
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.GetHits++
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
m.stats.GetNoEnt++
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return m.len
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
||||
|
||||
type JPCMap struct {
|
||||
store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
|
||||
m := &JPCMap{
|
||||
store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
// Do we have a map stored by k1?
|
||||
//
|
||||
m2, present := pcm.store.Get(k1)
|
||||
if present {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
}
|
||||
// We found a map of values corresponding to k1, so now we need to look up k2 in that map
|
||||
//
|
||||
return m2.Get(k2)
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
|
||||
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
// First does a map already exist for k1?
|
||||
//
|
||||
if m2, present := pcm.store.Get(k1); present {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
}
|
||||
_, present = m2.Put(k2, v)
|
||||
if !present {
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No map found for k1, so we create it, add in our value, then store is
|
||||
//
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
|
||||
} else {
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
|
||||
}
|
||||
|
||||
m2.Put(k2, v)
|
||||
pcm.store.Put(k1, m2)
|
||||
pcm.size++
|
||||
}
|
||||
}
|
||||
|
||||
type JPCMap2 struct {
|
||||
store map[int][]JPCEntry
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type JPCEntry struct {
|
||||
k1, k2, v *PredictionContext
|
||||
}
|
||||
|
||||
func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
|
||||
m := &JPCMap2{
|
||||
store: make(map[int][]JPCEntry, 1000),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func dHash(k1, k2 *PredictionContext) int {
|
||||
return k1.cachedHash*31 + k2.cachedHash
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
pcm.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
type VisitEntry struct {
|
||||
k *PredictionContext
|
||||
v *PredictionContext
|
||||
}
|
||||
type VisitRecord struct {
|
||||
store map[*PredictionContext]*PredictionContext
|
||||
len int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type VisitList struct {
|
||||
cache *list.List
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
var visitListPool = VisitList{
|
||||
cache: list.New(),
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
// NewVisitRecord returns a new VisitRecord instance from the pool if available.
|
||||
// Note that this "map" uses a pointer as a key because we are emulating the behavior of
|
||||
// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
|
||||
// which means is the key the same reference to an object rather than is it .equals() to another
|
||||
// object.
|
||||
func NewVisitRecord() *VisitRecord {
|
||||
visitListPool.lock.Lock()
|
||||
el := visitListPool.cache.Front()
|
||||
defer visitListPool.lock.Unlock()
|
||||
var vr *VisitRecord
|
||||
if el == nil {
|
||||
vr = &VisitRecord{
|
||||
store: make(map[*PredictionContext]*PredictionContext),
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats = &JStatRec{
|
||||
Source: PredictionContextCacheCollection,
|
||||
Description: "VisitRecord",
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
vr.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vr = el.Value.(*VisitRecord)
|
||||
visitListPool.cache.Remove(el)
|
||||
vr.store = make(map[*PredictionContext]*PredictionContext)
|
||||
}
|
||||
if collectStats {
|
||||
Statistics.AddJStatRec(vr.stats)
|
||||
}
|
||||
return vr
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Release() {
|
||||
vr.len = 0
|
||||
vr.store = nil
|
||||
if collectStats {
|
||||
vr.stats.MaxSize = 0
|
||||
vr.stats.CurSize = 0
|
||||
vr.stats.Gets = 0
|
||||
vr.stats.GetHits = 0
|
||||
vr.stats.GetMisses = 0
|
||||
vr.stats.GetHashConflicts = 0
|
||||
vr.stats.GetNoEnt = 0
|
||||
vr.stats.Puts = 0
|
||||
vr.stats.PutHits = 0
|
||||
vr.stats.PutMisses = 0
|
||||
vr.stats.PutHashConflicts = 0
|
||||
vr.stats.MaxSlotSize = 0
|
||||
}
|
||||
visitListPool.lock.Lock()
|
||||
visitListPool.cache.PushBack(vr)
|
||||
visitListPool.lock.Unlock()
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Gets++
|
||||
}
|
||||
v := vr.store[k]
|
||||
if v != nil {
|
||||
if collectStats {
|
||||
vr.stats.GetHits++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Puts++
|
||||
}
|
||||
vr.store[k] = v
|
||||
vr.len++
|
||||
if collectStats {
|
||||
vr.stats.CurSize = vr.len
|
||||
if vr.len > vr.stats.MaxSize {
|
||||
vr.stats.MaxSize = vr.len
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
@@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer {
|
||||
// create a single token. NextToken will return l object after
|
||||
// Matching lexer rule(s). If you subclass to allow multiple token
|
||||
// emissions, then set l to the last token to be Matched or
|
||||
// something nonnil so that the auto token emit mechanism will not
|
||||
// something non nil so that the auto token emit mechanism will not
|
||||
// emit another token.
|
||||
lexer.token = nil
|
||||
|
||||
@@ -111,6 +111,7 @@ const (
|
||||
LexerSkip = -3
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedConst
|
||||
const (
|
||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
||||
LexerHidden = TokenHiddenChannel
|
||||
@@ -118,7 +119,7 @@ const (
|
||||
LexerMaxCharValue = 0x10FFFF
|
||||
)
|
||||
|
||||
func (b *BaseLexer) reset() {
|
||||
func (b *BaseLexer) Reset() {
|
||||
// wack Lexer state variables
|
||||
if b.input != nil {
|
||||
b.input.Seek(0) // rewind the input
|
||||
@@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) {
|
||||
return b.Interpreter.Match(b.input, b.mode)
|
||||
}
|
||||
|
||||
// Return a token from l source i.e., Match a token on the char stream.
|
||||
// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
|
||||
func (b *BaseLexer) NextToken() Token {
|
||||
if b.input == nil {
|
||||
panic("NextToken requires a non-nil input stream.")
|
||||
@@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token {
|
||||
continueOuter := false
|
||||
for {
|
||||
b.thetype = TokenInvalidType
|
||||
ttype := LexerSkip
|
||||
|
||||
ttype = b.safeMatch()
|
||||
ttype := b.safeMatch()
|
||||
|
||||
if b.input.LA(1) == TokenEOF {
|
||||
b.hitEOF = true
|
||||
@@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token {
|
||||
}
|
||||
}
|
||||
|
||||
// Instruct the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. NextToken() knows to keep looking when
|
||||
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
|
||||
// Skip instructs the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. [NextToken] knows to keep looking when
|
||||
// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
|
||||
// if token==nil at end of any token rule, it creates one for you
|
||||
// and emits it.
|
||||
// /
|
||||
func (b *BaseLexer) Skip() {
|
||||
b.thetype = LexerSkip
|
||||
}
|
||||
@@ -248,23 +247,29 @@ func (b *BaseLexer) More() {
|
||||
b.thetype = LexerMore
|
||||
}
|
||||
|
||||
// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
|
||||
// will be in force.
|
||||
func (b *BaseLexer) SetMode(m int) {
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
|
||||
// current lexer mode to the supplied mode m.
|
||||
func (b *BaseLexer) PushMode(m int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("pushMode " + strconv.Itoa(m))
|
||||
}
|
||||
b.modeStack.Push(b.mode)
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
|
||||
// return to.
|
||||
func (b *BaseLexer) PopMode() int {
|
||||
if len(b.modeStack) == 0 {
|
||||
panic("Empty Stack")
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
|
||||
}
|
||||
i, _ := b.modeStack.Pop()
|
||||
@@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream {
|
||||
func (b *BaseLexer) SetInputStream(input CharStream) {
|
||||
b.input = nil
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
b.reset()
|
||||
b.Reset()
|
||||
b.input = input
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
}
|
||||
@@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
|
||||
return b.tokenFactorySourcePair
|
||||
}
|
||||
|
||||
// By default does not support multiple emits per NextToken invocation
|
||||
// for efficiency reasons. Subclass and override l method, NextToken,
|
||||
// and GetToken (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as l implementation does).
|
||||
// /
|
||||
// EmitToken by default does not support multiple emits per [NextToken] invocation
|
||||
// for efficiency reasons. Subclass and override this func, [NextToken],
|
||||
// and [GetToken] (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as this implementation does).
|
||||
func (b *BaseLexer) EmitToken(token Token) {
|
||||
b.token = token
|
||||
}
|
||||
|
||||
// The standard method called to automatically emit a token at the
|
||||
// Emit is the standard method called to automatically emit a token at the
|
||||
// outermost lexical rule. The token object should point into the
|
||||
// char buffer start..stop. If there is a text override in 'text',
|
||||
// use that to set the token's text. Override l method to emit
|
||||
// custom Token objects or provide a Newfactory.
|
||||
// use that to set the token's text. Override this method to emit
|
||||
// custom [Token] objects or provide a new factory.
|
||||
// /
|
||||
func (b *BaseLexer) Emit() Token {
|
||||
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
|
||||
@@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token {
|
||||
return t
|
||||
}
|
||||
|
||||
// EmitEOF emits an EOF token. By default, this is the last token emitted
|
||||
func (b *BaseLexer) EmitEOF() Token {
|
||||
cpos := b.GetCharPositionInLine()
|
||||
lpos := b.GetLine()
|
||||
@@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token {
|
||||
return eof
|
||||
}
|
||||
|
||||
// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
|
||||
func (b *BaseLexer) GetCharPositionInLine() int {
|
||||
return b.Interpreter.GetCharPositionInLine()
|
||||
}
|
||||
@@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) {
|
||||
b.thetype = t
|
||||
}
|
||||
|
||||
// What is the index of the current character of lookahead?///
|
||||
// GetCharIndex returns the index of the current character of lookahead
|
||||
func (b *BaseLexer) GetCharIndex() int {
|
||||
return b.input.Index()
|
||||
}
|
||||
|
||||
// Return the text Matched so far for the current token or any text override.
|
||||
// Set the complete text of l token it wipes any previous changes to the text.
|
||||
// GetText returns the text Matched so far for the current token or any text override.
|
||||
func (b *BaseLexer) GetText() string {
|
||||
if b.text != "" {
|
||||
return b.text
|
||||
@@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string {
|
||||
return b.Interpreter.GetText(b.input)
|
||||
}
|
||||
|
||||
// SetText sets the complete text of this token; it wipes any previous changes to the text.
|
||||
func (b *BaseLexer) SetText(text string) {
|
||||
b.text = text
|
||||
}
|
||||
|
||||
// GetATN returns the ATN used by the lexer.
|
||||
func (b *BaseLexer) GetATN() *ATN {
|
||||
return b.Interpreter.ATN()
|
||||
}
|
||||
|
||||
// Return a list of all Token objects in input char stream.
|
||||
// Forces load of all tokens. Does not include EOF token.
|
||||
// /
|
||||
// GetAllTokens returns a list of all [Token] objects in input char stream.
|
||||
// Forces a load of all tokens that can be made from the input char stream.
|
||||
//
|
||||
// Does not include EOF token.
|
||||
func (b *BaseLexer) GetAllTokens() []Token {
|
||||
vl := b.Virt
|
||||
tokens := make([]Token, 0)
|
||||
@@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string {
|
||||
return "'" + b.getErrorDisplayForChar(c) + "'"
|
||||
}
|
||||
|
||||
// Lexers can normally Match any char in it's vocabulary after Matching
|
||||
// a token, so do the easy thing and just kill a character and hope
|
||||
// Recover can normally Match any char in its vocabulary after Matching
|
||||
// a token, so here we do the easy thing and just kill a character and hope
|
||||
// it all works out. You can instead use the rule invocation stack
|
||||
// to do sophisticated error recovery if you are in a fragment rule.
|
||||
// /
|
||||
//
|
||||
// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
|
||||
// a character that makes no sense to the recognizer.
|
||||
func (b *BaseLexer) Recover(re RecognitionException) {
|
||||
if b.input.LA(1) != TokenEOF {
|
||||
if _, ok := re.(*LexerNoViableAltException); ok {
|
@@ -7,14 +7,29 @@ package antlr
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
|
||||
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
|
||||
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
|
||||
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
|
||||
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
|
||||
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
|
||||
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
|
||||
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
|
||||
// LexerActionTypeChannel represents a [LexerChannelAction] action.
|
||||
LexerActionTypeChannel = 0
|
||||
|
||||
// LexerActionTypeCustom represents a [LexerCustomAction] action.
|
||||
LexerActionTypeCustom = 1
|
||||
|
||||
// LexerActionTypeMode represents a [LexerModeAction] action.
|
||||
LexerActionTypeMode = 2
|
||||
|
||||
// LexerActionTypeMore represents a [LexerMoreAction] action.
|
||||
LexerActionTypeMore = 3
|
||||
|
||||
// LexerActionTypePopMode represents a [LexerPopModeAction] action.
|
||||
LexerActionTypePopMode = 4
|
||||
|
||||
// LexerActionTypePushMode represents a [LexerPushModeAction] action.
|
||||
LexerActionTypePushMode = 5
|
||||
|
||||
// LexerActionTypeSkip represents a [LexerSkipAction] action.
|
||||
LexerActionTypeSkip = 6
|
||||
|
||||
// LexerActionTypeType represents a [LexerTypeAction] action.
|
||||
LexerActionTypeType = 7
|
||||
)
|
||||
|
||||
type LexerAction interface {
|
||||
@@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
|
||||
return la
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) execute(lexer Lexer) {
|
||||
func (b *BaseLexerAction) execute(_ Lexer) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
@@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Hash() int {
|
||||
return b.actionType
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, b.actionType)
|
||||
return murmurFinish(h, 1)
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Equals(other LexerAction) bool {
|
||||
return b == other
|
||||
return b.actionType == other.getActionType()
|
||||
}
|
||||
|
||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
|
||||
// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
|
||||
//
|
||||
// <p>The {@code Skip} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
// The Skip command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
|
||||
type LexerSkipAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
@@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction {
|
||||
return la
|
||||
}
|
||||
|
||||
// Provides a singleton instance of l parameterless lexer action.
|
||||
// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
|
||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
|
||||
|
||||
func (l *LexerSkipAction) execute(lexer Lexer) {
|
||||
lexer.Skip()
|
||||
}
|
||||
|
||||
// String returns a string representation of the current [LexerSkipAction].
|
||||
func (l *LexerSkipAction) String() string {
|
||||
return "skip"
|
||||
}
|
||||
|
||||
func (b *LexerSkipAction) Equals(other LexerAction) bool {
|
||||
return other.getActionType() == LexerActionTypeSkip
|
||||
}
|
||||
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
//
|
||||
// with the assigned type.
|
||||
@@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string {
|
||||
return "actionType(" + strconv.Itoa(l.thetype) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code pushMode} lexer action by calling
|
||||
// {@link Lexer//pushMode} with the assigned mode.
|
||||
// LexerPushModeAction implements the pushMode lexer action by calling
|
||||
// [Lexer.pushMode] with the assigned mode.
|
||||
type LexerPushModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
@@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string {
|
||||
return "pushMode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
|
||||
// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
|
||||
//
|
||||
// <p>The {@code popMode} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
// The popMode command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
|
||||
type LexerPopModeAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
@@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string {
|
||||
return "more"
|
||||
}
|
||||
|
||||
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
|
||||
// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
|
||||
// the assigned mode.
|
||||
type LexerModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
@@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the {@code channel} lexer action by calling
|
||||
// {@link Lexer//setChannel} with the assigned channel.
|
||||
// Constructs a New{@code channel} action with the specified channel value.
|
||||
// @param channel The channel value to pass to {@link Lexer//setChannel}.
|
||||
// LexerChannelAction implements the channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
type LexerChannelAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
channel int
|
||||
}
|
||||
|
||||
// NewLexerChannelAction creates a channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
|
||||
l := new(LexerChannelAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
|
||||
@@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string {
|
||||
// lexer actions, see {@link LexerActionExecutor//append} and
|
||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
|
||||
|
||||
// Constructs a Newindexed custom action by associating a character offset
|
||||
// with a {@link LexerAction}.
|
||||
//
|
||||
// <p>Note: This class is only required for lexer actions for which
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
|
||||
//
|
||||
// @param offset The offset into the input {@link CharStream}, relative to
|
||||
// the token start index, at which the specified lexer action should be
|
||||
// executed.
|
||||
// @param action The lexer action to execute at a particular offset in the
|
||||
// input {@link CharStream}.
|
||||
type LexerIndexedCustomAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
offset int
|
||||
lexerAction LexerAction
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
|
||||
// with a [LexerAction].
|
||||
//
|
||||
// Note: This class is only required for lexer actions for which
|
||||
// [LexerAction.isPositionDependent] returns true.
|
||||
//
|
||||
// The offset points into the input [CharStream], relative to
|
||||
// the token start index, at which the specified lexerAction should be
|
||||
// executed.
|
||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
|
||||
|
||||
l := new(LexerIndexedCustomAction)
|
@@ -29,28 +29,20 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
l.lexerActions = lexerActions
|
||||
|
||||
// Caches the result of {@link //hashCode} since the hash code is an element
|
||||
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(57)
|
||||
// of the performance-critical {@link ATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(0)
|
||||
for _, a := range lexerActions {
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
|
||||
}
|
||||
l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which executes the actions for
|
||||
// the input {@code lexerActionExecutor} followed by a specified
|
||||
// {@code lexerAction}.
|
||||
//
|
||||
// @param lexerActionExecutor The executor for actions already traversed by
|
||||
// the lexer while Matching a token within a particular
|
||||
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
|
||||
// though it were an empty executor.
|
||||
// @param lexerAction The lexer action to execute after the actions
|
||||
// specified in {@code lexerActionExecutor}.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} for executing the combine actions
|
||||
// of {@code lexerActionExecutor} and {@code lexerAction}.
|
||||
// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
|
||||
// the input [LexerActionExecutor] followed by a specified
|
||||
// [LexerAction].
|
||||
// TODO: This does not match the Java code
|
||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
|
||||
if lexerActionExecutor == nil {
|
||||
return NewLexerActionExecutor([]LexerAction{lexerAction})
|
||||
@@ -59,47 +51,42 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
|
||||
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which encodes the current offset
|
||||
// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
|
||||
// for position-dependent lexer actions.
|
||||
//
|
||||
// <p>Normally, when the executor encounters lexer actions where
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
|
||||
// {@link IntStream//seek} on the input {@link CharStream} to set the input
|
||||
// position to the <em>end</em> of the current token. This behavior provides
|
||||
// for efficient DFA representation of lexer actions which appear at the end
|
||||
// Normally, when the executor encounters lexer actions where
|
||||
// [LexerAction.isPositionDependent] returns true, it calls
|
||||
// [IntStream.Seek] on the input [CharStream] to set the input
|
||||
// position to the end of the current token. This behavior provides
|
||||
// for efficient [DFA] representation of lexer actions which appear at the end
|
||||
// of a lexer rule, even when the lexer rule Matches a variable number of
|
||||
// characters.</p>
|
||||
// characters.
|
||||
//
|
||||
// <p>Prior to traversing a Match transition in the ATN, the current offset
|
||||
// Prior to traversing a Match transition in the [ATN], the current offset
|
||||
// from the token start index is assigned to all position-dependent lexer
|
||||
// actions which have not already been assigned a fixed offset. By storing
|
||||
// the offsets relative to the token start index, the DFA representation of
|
||||
// the offsets relative to the token start index, the [DFA] representation of
|
||||
// lexer actions which appear in the middle of tokens remains efficient due
|
||||
// to sharing among tokens of the same length, regardless of their absolute
|
||||
// position in the input stream.</p>
|
||||
// to sharing among tokens of the same Length, regardless of their absolute
|
||||
// position in the input stream.
|
||||
//
|
||||
// <p>If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns {@code this}.</p>
|
||||
// If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns this instance.
|
||||
//
|
||||
// @param offset The current offset to assign to all position-dependent
|
||||
// The offset is assigned to all position-dependent
|
||||
// lexer actions which do not already have offsets assigned.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} which stores input stream offsets
|
||||
// The func returns a [LexerActionExecutor] that stores input stream offsets
|
||||
// for all position-dependent lexer actions.
|
||||
// /
|
||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
|
||||
var updatedLexerActions []LexerAction
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
|
||||
if l.lexerActions[i].getIsPositionDependent() && !ok {
|
||||
if updatedLexerActions == nil {
|
||||
updatedLexerActions = make([]LexerAction, 0)
|
||||
|
||||
for _, a := range l.lexerActions {
|
||||
updatedLexerActions = append(updatedLexerActions, a)
|
||||
updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
|
||||
updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
|
||||
}
|
||||
}
|
||||
|
||||
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
|
||||
}
|
||||
}
|
@@ -10,10 +10,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
LexerATNSimulatorDebug = false
|
||||
LexerATNSimulatorDFADebug = false
|
||||
|
||||
LexerATNSimulatorMinDFAEdge = 0
|
||||
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
|
||||
|
||||
@@ -32,11 +30,11 @@ type ILexerATNSimulator interface {
|
||||
}
|
||||
|
||||
type LexerATNSimulator struct {
|
||||
*BaseATNSimulator
|
||||
BaseATNSimulator
|
||||
|
||||
recog Lexer
|
||||
predictionMode int
|
||||
mergeCache DoubleDict
|
||||
mergeCache *JPCMap2
|
||||
startIndex int
|
||||
Line int
|
||||
CharPositionInLine int
|
||||
@@ -46,27 +44,35 @@ type LexerATNSimulator struct {
|
||||
}
|
||||
|
||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
|
||||
l := new(LexerATNSimulator)
|
||||
|
||||
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
|
||||
l := &LexerATNSimulator{
|
||||
BaseATNSimulator: BaseATNSimulator{
|
||||
atn: atn,
|
||||
sharedContextCache: sharedContextCache,
|
||||
},
|
||||
}
|
||||
|
||||
l.decisionToDFA = decisionToDFA
|
||||
l.recog = recog
|
||||
|
||||
// The current token's starting index into the character stream.
|
||||
// Shared across DFA to ATN simulation in case the ATN fails and the
|
||||
// DFA did not have a previous accept state. In l case, we use the
|
||||
// ATN-generated exception object.
|
||||
l.startIndex = -1
|
||||
// line number 1..n within the input///
|
||||
|
||||
// line number 1..n within the input
|
||||
l.Line = 1
|
||||
|
||||
// The index of the character relative to the beginning of the line
|
||||
// 0..n-1///
|
||||
// 0..n-1
|
||||
l.CharPositionInLine = 0
|
||||
|
||||
l.mode = LexerDefaultMode
|
||||
|
||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
||||
// info
|
||||
l.prevAccept = NewSimState()
|
||||
// done
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() {
|
||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
startState := l.atn.modeToStartState[l.mode]
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
|
||||
}
|
||||
oldMode := l.mode
|
||||
@@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
|
||||
predict := l.execATN(input, next)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
|
||||
}
|
||||
return predict
|
||||
@@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
|
||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("start state closure=" + ds0.configs.String())
|
||||
}
|
||||
if ds0.isAcceptState {
|
||||
// allow zero-length tokens
|
||||
// allow zero-Length tokens
|
||||
l.captureSimState(l.prevAccept, input, ds0)
|
||||
}
|
||||
t := input.LA(1)
|
||||
s := ds0 // s is current/from DFA state
|
||||
|
||||
for { // while more work
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("execATN loop starting closure: " + s.configs.String())
|
||||
}
|
||||
|
||||
@@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
}
|
||||
}
|
||||
t = input.LA(1)
|
||||
s = target // flip current DFA target becomes Newsrc/from state
|
||||
s = target // flip current DFA target becomes new src/from state
|
||||
}
|
||||
|
||||
return l.failOrAccept(l.prevAccept, input, s.configs, t)
|
||||
@@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
|
||||
return nil
|
||||
}
|
||||
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
|
||||
if LexerATNSimulatorDebug && target != nil {
|
||||
if runtimeConfig.lexerATNSimulatorDebug && target != nil {
|
||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
// Compute a target state for an edge in the DFA, and attempt to add the
|
||||
// computed state and corresponding edge to the DFA.
|
||||
// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
|
||||
// computed state and corresponding edge to the [DFA].
|
||||
//
|
||||
// @param input The input stream
|
||||
// @param s The current DFA state
|
||||
// @param t The next input symbol
|
||||
//
|
||||
// @return The computed target DFA state for the given input symbol
|
||||
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
|
||||
// returns {@link //ERROR}.
|
||||
// The func returns the computed target [DFA] state for the given input symbol t.
|
||||
// If this does not lead to a valid [DFA] state, this method
|
||||
// returns ATNSimulatorError.
|
||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
|
||||
reach := NewOrderedATNConfigSet()
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
|
||||
l.getReachableConfigSet(input, s.configs, reach, t)
|
||||
|
||||
if len(reach.configs) == 0 { // we got nowhere on t from s
|
||||
if !reach.hasSemanticContext {
|
||||
// we got nowhere on t, don't panic out l knowledge it'd
|
||||
// cause a failover from DFA later.
|
||||
// cause a fail-over from DFA later.
|
||||
l.addDFAEdge(s, t, ATNSimulatorError, nil)
|
||||
}
|
||||
// stop when we can't Match any more char
|
||||
return ATNSimulatorError
|
||||
}
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
|
||||
return l.addDFAEdge(s, t, nil, reach)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
|
||||
if l.prevAccept.dfaState != nil {
|
||||
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
|
||||
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
||||
@@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
|
||||
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
|
||||
}
|
||||
|
||||
// Given a starting configuration set, figure out all ATN configurations
|
||||
// we can reach upon input {@code t}. Parameter {@code reach} is a return
|
||||
// parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
|
||||
// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
|
||||
// we can reach upon input t.
|
||||
//
|
||||
// Parameter reach is a return parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
|
||||
// l is used to Skip processing for configs which have a lower priority
|
||||
// than a config that already reached an accept state for the same rule
|
||||
// than a runtimeConfig that already reached an accept state for the same rule
|
||||
SkipAlt := ATNInvalidAltNumber
|
||||
|
||||
for _, cfg := range closure.GetItems() {
|
||||
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
|
||||
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
|
||||
for _, cfg := range closure.configs {
|
||||
currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
|
||||
if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
|
||||
continue
|
||||
}
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
|
||||
}
|
||||
|
||||
for _, trans := range cfg.GetState().GetTransitions() {
|
||||
target := l.getReachableTarget(trans, t)
|
||||
if target != nil {
|
||||
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
|
||||
lexerActionExecutor := cfg.lexerActionExecutor
|
||||
if lexerActionExecutor != nil {
|
||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
|
||||
}
|
||||
treatEOFAsEpsilon := (t == TokenEOF)
|
||||
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
|
||||
treatEOFAsEpsilon := t == TokenEOF
|
||||
config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
|
||||
if l.closure(input, config, reach,
|
||||
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
|
||||
// any remaining configs for l alt have a lower priority
|
||||
@@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Printf("ACTION %v\n", lexerActionExecutor)
|
||||
}
|
||||
// seek to after last char in token
|
||||
@@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
|
||||
configs := NewOrderedATNConfigSet()
|
||||
for i := 0; i < len(p.GetTransitions()); i++ {
|
||||
target := p.GetTransitions()[i].getTarget()
|
||||
@@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord
|
||||
return configs
|
||||
}
|
||||
|
||||
// Since the alternatives within any lexer decision are ordered by
|
||||
// preference, l method stops pursuing the closure as soon as an accept
|
||||
// closure since the alternatives within any lexer decision are ordered by
|
||||
// preference, this method stops pursuing the closure as soon as an accept
|
||||
// state is reached. After the first accept state is reached by depth-first
|
||||
// search from {@code config}, all other (potentially reachable) states for
|
||||
// l rule would have a lower priority.
|
||||
// search from runtimeConfig, all other (potentially reachable) states for
|
||||
// this rule would have a lower priority.
|
||||
//
|
||||
// @return {@code true} if an accept state is reached, otherwise
|
||||
// {@code false}.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
|
||||
// The func returns true if an accept state is reached.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")")
|
||||
}
|
||||
|
||||
_, ok := config.state.(*RuleStopState)
|
||||
if ok {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
if l.recog != nil {
|
||||
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
|
||||
} else {
|
||||
@@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co
|
||||
}
|
||||
|
||||
// side-effect: can alter configs.hasSemanticContext
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
|
||||
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
|
||||
configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
|
||||
|
||||
var cfg *LexerATNConfig
|
||||
var cfg *ATNConfig
|
||||
|
||||
if trans.getSerializationType() == TransitionRULE {
|
||||
|
||||
@@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
|
||||
pt := trans.(*PredicateTransition)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
|
||||
}
|
||||
configs.SetHasSemanticContext(true)
|
||||
configs.hasSemanticContext = true
|
||||
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
@@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
// TODO: if the entry rule is invoked recursively, some
|
||||
// actions may be executed during the recursive call. The
|
||||
// problem can appear when hasEmptyPath() is true but
|
||||
// isEmpty() is false. In l case, the config needs to be
|
||||
// isEmpty() is false. In this case, the config needs to be
|
||||
// split into two contexts - one with just the empty path
|
||||
// and another with everything but the empty path.
|
||||
// Unfortunately, the current algorithm does not allow
|
||||
@@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Evaluate a predicate specified in the lexer.
|
||||
// evaluatePredicate eEvaluates a predicate specified in the lexer.
|
||||
//
|
||||
// <p>If {@code speculative} is {@code true}, l method was called before
|
||||
// {@link //consume} for the Matched character. This method should call
|
||||
// {@link //consume} before evaluating the predicate to ensure position
|
||||
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
|
||||
// and {@link Lexer//getcolumn}, properly reflect the current
|
||||
// lexer state. This method should restore {@code input} and the simulator
|
||||
// to the original state before returning (i.e. undo the actions made by the
|
||||
// call to {@link //consume}.</p>
|
||||
// If speculative is true, this method was called before
|
||||
// [consume] for the Matched character. This method should call
|
||||
// [consume] before evaluating the predicate to ensure position
|
||||
// sensitive values, including [GetText], [GetLine],
|
||||
// and [GetColumn], properly reflect the current
|
||||
// lexer state. This method should restore input and the simulator
|
||||
// to the original state before returning, i.e. undo the actions made by the
|
||||
// call to [Consume].
|
||||
//
|
||||
// @param input The input stream.
|
||||
// @param ruleIndex The rule containing the predicate.
|
||||
// @param predIndex The index of the predicate within the rule.
|
||||
// @param speculative {@code true} if the current index in {@code input} is
|
||||
// one character before the predicate's location.
|
||||
//
|
||||
// @return {@code true} if the specified predicate evaluates to
|
||||
// {@code true}.
|
||||
// /
|
||||
// The func returns true if the specified predicate evaluates to true.
|
||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
|
||||
// assume true if no recognizer was provided
|
||||
if l.recog == nil {
|
||||
@@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
|
||||
settings.dfaState = dfaState
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
|
||||
if to == nil && cfgs != nil {
|
||||
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
|
||||
// marker indicating dynamic predicate evaluation makes l edge
|
||||
@@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// TJP notes: next time through the DFA, we see a pred again and eval.
|
||||
// If that gets us to a previously created (but dangling) DFA
|
||||
// state, we can continue in pure DFA mode from there.
|
||||
// /
|
||||
suppressEdge := cfgs.HasSemanticContext()
|
||||
cfgs.SetHasSemanticContext(false)
|
||||
|
||||
//
|
||||
suppressEdge := cfgs.hasSemanticContext
|
||||
cfgs.hasSemanticContext = false
|
||||
to = l.addDFAState(cfgs, true)
|
||||
|
||||
if suppressEdge {
|
||||
@@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// Only track edges within the DFA bounds
|
||||
return to
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
||||
}
|
||||
l.atn.edgeMu.Lock()
|
||||
@@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// configurations already. This method also detects the first
|
||||
// configuration containing an ATN rule stop state. Later, when
|
||||
// traversing the DFA, we will know which rule to accept.
|
||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
|
||||
proposed := NewDFAState(-1, configs)
|
||||
var firstConfigWithRuleStopState ATNConfig
|
||||
|
||||
for _, cfg := range configs.GetItems() {
|
||||
var firstConfigWithRuleStopState *ATNConfig
|
||||
|
||||
for _, cfg := range configs.configs {
|
||||
_, ok := cfg.GetState().(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
@@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
|
||||
}
|
||||
if firstConfigWithRuleStopState != nil {
|
||||
proposed.isAcceptState = true
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
|
||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
||||
}
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, present := dfa.states.Get(proposed)
|
||||
existing, present := dfa.Get(proposed)
|
||||
if present {
|
||||
|
||||
// This state was already present, so just return it.
|
||||
@@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
|
||||
|
||||
// We need to add the new state
|
||||
//
|
||||
proposed.stateNumber = dfa.states.Len()
|
||||
configs.SetReadOnly(true)
|
||||
proposed.stateNumber = dfa.Len()
|
||||
configs.readOnly = true
|
||||
configs.configLookup = nil // Not needed now
|
||||
proposed.configs = configs
|
||||
dfa.states.Put(proposed)
|
||||
dfa.Put(proposed)
|
||||
}
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
@@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
||||
return l.decisionToDFA[mode]
|
||||
}
|
||||
|
||||
// Get the text Matched so far for the current token.
|
||||
// GetText returns the text [Match]ed so far for the current token.
|
||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
|
||||
// index is first lookahead char, don't include.
|
||||
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
|
@@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
||||
return la
|
||||
}
|
||||
|
||||
// - Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
//
|
||||
// /
|
||||
const (
|
||||
// LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if
|
||||
//
|
||||
// seeThruPreds==false
|
||||
LL1AnalyzerHitPred = TokenInvalidType
|
||||
)
|
||||
|
||||
@@ -38,11 +38,12 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
count := len(s.GetTransitions())
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
seeThruPreds := false // fail to get lookahead upon pred
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
||||
// Wipe out lookahead for la alternative if we found nothing
|
||||
lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
|
||||
|
||||
// Wipe out lookahead for la alternative if we found nothing,
|
||||
// or we had a predicate when we !seeThruPreds
|
||||
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
|
||||
look[alt] = nil
|
||||
@@ -51,32 +52,31 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
return look
|
||||
}
|
||||
|
||||
// *
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
// Look computes the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
//
|
||||
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
|
||||
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
|
||||
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
|
||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
||||
// If ctx is nil and the end of the rule containing
|
||||
// s is reached, [EPSILON] is added to the result set.
|
||||
//
|
||||
// @param s the ATN state
|
||||
// @param stopState the ATN state to stop at. This can be a
|
||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
||||
// @param ctx the complete parser context, or {@code nil} if the context
|
||||
// If ctx is not nil and the end of the outermost rule is
|
||||
// reached, [EOF] is added to the result set.
|
||||
//
|
||||
// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
|
||||
// [BlockEndState] to detect epsilon paths through a closure.
|
||||
//
|
||||
// Parameter ctx is the complete parser context, or nil if the context
|
||||
// should be ignored
|
||||
//
|
||||
// @return The set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
// /
|
||||
// The func returns the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
||||
r := NewIntervalSet()
|
||||
seeThruPreds := true // ignore preds get all lookahead
|
||||
var lookContext PredictionContext
|
||||
var lookContext *PredictionContext
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
|
||||
NewBitSet(), true, true)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -110,16 +110,17 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
|
||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
||||
// is {@code nil}.
|
||||
|
||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
|
||||
c := NewBaseATNConfig6(s, 0, ctx)
|
||||
c := NewATNConfig6(s, 0, ctx)
|
||||
|
||||
if lookBusy.Contains(c) {
|
||||
return
|
||||
@@ -151,7 +152,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
return
|
||||
}
|
||||
|
||||
if ctx != BasePredictionContextEMPTY {
|
||||
if ctx.pcType != PredictionContextEmpty {
|
||||
removed := calledRuleStack.contains(s.GetRuleIndex())
|
||||
defer func() {
|
||||
if removed {
|
||||
@@ -202,7 +203,8 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
}
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
|
47
vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
47
vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
//go:build !antlr.stats
|
||||
|
||||
package antlr
|
||||
|
||||
// This file is compiled when the build configuration antlr.stats is not enabled.
|
||||
// which then allows the compiler to optimize out all the code that is not used.
|
||||
const collectStats = false
|
||||
|
||||
// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
|
||||
type goRunStats struct {
|
||||
}
|
||||
|
||||
var Statistics = &goRunStats{}
|
||||
|
||||
func (s *goRunStats) AddJStatRec(_ *JStatRec) {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) CollectionAnomalies() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Reset() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Report(dir string, prefix string) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) Analyze() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
type statsOption func(*goRunStats) error
|
||||
|
||||
func (s *goRunStats) Configure(options ...statsOption) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithTopN(topN int) statsOption {
|
||||
return func(s *goRunStats) error {
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -48,8 +48,10 @@ type BaseParser struct {
|
||||
_SyntaxErrors int
|
||||
}
|
||||
|
||||
// p.is all the parsing support code essentially most of it is error
|
||||
// recovery stuff.//
|
||||
// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
|
||||
// recovery stuff.
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
p := new(BaseParser)
|
||||
@@ -58,39 +60,46 @@ func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
// The input stream.
|
||||
p.input = nil
|
||||
|
||||
// The error handling strategy for the parser. The default value is a new
|
||||
// instance of {@link DefaultErrorStrategy}.
|
||||
p.errHandler = NewDefaultErrorStrategy()
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
// The {@link ParserRuleContext} object for the currently executing rule.
|
||||
|
||||
// The ParserRuleContext object for the currently executing rule.
|
||||
// p.is always non-nil during the parsing process.
|
||||
p.ctx = nil
|
||||
// Specifies whether or not the parser should construct a parse tree during
|
||||
|
||||
// Specifies whether the parser should construct a parse tree during
|
||||
// the parsing process. The default value is {@code true}.
|
||||
p.BuildParseTrees = true
|
||||
// When {@link //setTrace}{@code (true)} is called, a reference to the
|
||||
// {@link TraceListener} is stored here so it can be easily removed in a
|
||||
// later call to {@link //setTrace}{@code (false)}. The listener itself is
|
||||
|
||||
// When setTrace(true) is called, a reference to the
|
||||
// TraceListener is stored here, so it can be easily removed in a
|
||||
// later call to setTrace(false). The listener itself is
|
||||
// implemented as a parser listener so p.field is not directly used by
|
||||
// other parser methods.
|
||||
p.tracer = nil
|
||||
// The list of {@link ParseTreeListener} listeners registered to receive
|
||||
|
||||
// The list of ParseTreeListener listeners registered to receive
|
||||
// events during the parse.
|
||||
p.parseListeners = nil
|
||||
|
||||
// The number of syntax errors Reported during parsing. p.value is
|
||||
// incremented each time {@link //NotifyErrorListeners} is called.
|
||||
// incremented each time NotifyErrorListeners is called.
|
||||
p._SyntaxErrors = 0
|
||||
p.SetInputStream(input)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// p.field maps from the serialized ATN string to the deserialized {@link
|
||||
// ATN} with
|
||||
// This field maps from the serialized ATN string to the deserialized [ATN] with
|
||||
// bypass alternatives.
|
||||
//
|
||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
|
||||
// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var bypassAltsAtnCache = make(map[string]int)
|
||||
|
||||
// reset the parser's state//
|
||||
@@ -143,10 +152,13 @@ func (p *BaseParser) Match(ttype int) Token {
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.HasError() {
|
||||
return nil
|
||||
}
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
@@ -178,9 +190,8 @@ func (p *BaseParser) MatchWildcard() Token {
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
@@ -202,33 +213,27 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
||||
return p.parseListeners
|
||||
}
|
||||
|
||||
// Registers {@code listener} to receive events during the parsing process.
|
||||
// AddParseListener registers listener to receive events during the parsing process.
|
||||
//
|
||||
// <p>To support output-preserving grammar transformations (including but not
|
||||
// To support output-preserving grammar transformations (including but not
|
||||
// limited to left-recursion removal, automated left-factoring, and
|
||||
// optimized code generation), calls to listener methods during the parse
|
||||
// may differ substantially from calls made by
|
||||
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
|
||||
// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
|
||||
// particular, rule entry and exit events may occur in a different order
|
||||
// during the parse than after the parser. In addition, calls to certain
|
||||
// rule entry methods may be omitted.</p>
|
||||
// rule entry methods may be omitted.
|
||||
//
|
||||
// <p>With the following specific exceptions, calls to listener events are
|
||||
// <em>deterministic</em>, i.e. for identical input the calls to listener
|
||||
// methods will be the same.</p>
|
||||
// With the following specific exceptions, calls to listener events are
|
||||
// deterministic, i.e. for identical input the calls to listener
|
||||
// methods will be the same.
|
||||
//
|
||||
// <ul>
|
||||
// <li>Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.</li>
|
||||
// <li>Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.</li>
|
||||
// <li>Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.</li>
|
||||
// </ul>
|
||||
//
|
||||
// @param listener the listener to add
|
||||
//
|
||||
// @panics nilPointerException if {@code} listener is {@code nil}
|
||||
// - Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.
|
||||
// - Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.
|
||||
// - Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.
|
||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
if listener == nil {
|
||||
panic("listener")
|
||||
@@ -239,11 +244,10 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
p.parseListeners = append(p.parseListeners, listener)
|
||||
}
|
||||
|
||||
// Remove {@code listener} from the list of parse listeners.
|
||||
// RemoveParseListener removes listener from the list of parse listeners.
|
||||
//
|
||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
|
||||
// listener, p.method does nothing.</p>
|
||||
// @param listener the listener to remove
|
||||
// If listener is nil or has not been added as a parse
|
||||
// listener, this func does nothing.
|
||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
||||
|
||||
if p.parseListeners != nil {
|
||||
@@ -274,7 +278,7 @@ func (p *BaseParser) removeParseListeners() {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
|
||||
// Notify any parse listeners of an enter rule event.
|
||||
// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
|
||||
func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
ctx := p.ctx
|
||||
@@ -285,9 +289,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
// Notify any parse listeners of an exit rule event.
|
||||
//
|
||||
// @see //addParseListener
|
||||
// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
|
||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
// reverse order walk of listeners
|
||||
@@ -314,19 +316,16 @@ func (p *BaseParser) GetTokenFactory() TokenFactory {
|
||||
return p.input.GetTokenSource().GetTokenFactory()
|
||||
}
|
||||
|
||||
// Tell our token source and error strategy about a Newway to create tokens.//
|
||||
// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
|
||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
||||
p.input.GetTokenSource().setTokenFactory(factory)
|
||||
}
|
||||
|
||||
// The ATN with bypass alternatives is expensive to create so we create it
|
||||
// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
|
||||
// lazily.
|
||||
//
|
||||
// @panics UnsupportedOperationException if the current parser does not
|
||||
// implement the {@link //getSerializedATN()} method.
|
||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
|
||||
// TODO
|
||||
// TODO - Implement this?
|
||||
panic("Not implemented!")
|
||||
|
||||
// serializedAtn := p.getSerializedATN()
|
||||
@@ -354,6 +353,7 @@ func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
// String id = m.Get("ID")
|
||||
// </pre>
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
|
||||
|
||||
panic("NewParseTreePatternMatcher not implemented!")
|
||||
@@ -386,14 +386,16 @@ func (p *BaseParser) GetTokenStream() TokenStream {
|
||||
return p.input
|
||||
}
|
||||
|
||||
// Set the token stream and reset the parser.//
|
||||
// SetTokenStream installs input as the token stream and resets the parser.
|
||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
|
||||
p.input = nil
|
||||
p.reset()
|
||||
p.input = input
|
||||
}
|
||||
|
||||
// Match needs to return the current input symbol, which gets put
|
||||
// GetCurrentToken returns the current token at LT(1).
|
||||
//
|
||||
// [Match] needs to return the current input symbol, which gets put
|
||||
// into the label for the associated token ref e.g., x=ID.
|
||||
func (p *BaseParser) GetCurrentToken() Token {
|
||||
return p.input.LT(1)
|
||||
@@ -446,7 +448,7 @@ func (p *BaseParser) addContextToParseTree() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
|
||||
p.SetState(state)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
@@ -474,7 +476,7 @@ func (p *BaseParser) ExitRule() {
|
||||
|
||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
|
||||
localctx.SetAltNumber(altNum)
|
||||
// if we have Newlocalctx, make sure we replace existing ctx
|
||||
// if we have a new localctx, make sure we replace existing ctx
|
||||
// that is previous child of parse tree
|
||||
if p.BuildParseTrees && p.ctx != localctx {
|
||||
if p.ctx.GetParent() != nil {
|
||||
@@ -498,7 +500,7 @@ func (p *BaseParser) GetPrecedence() int {
|
||||
return p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
|
||||
p.SetState(state)
|
||||
p.precedenceStack.Push(precedence)
|
||||
p.ctx = localctx
|
||||
@@ -512,7 +514,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI
|
||||
//
|
||||
// Like {@link //EnterRule} but for recursive rules.
|
||||
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
|
||||
previous := p.ctx
|
||||
previous.SetParent(localctx)
|
||||
previous.SetInvokingState(state)
|
||||
@@ -530,7 +532,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state,
|
||||
}
|
||||
|
||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
|
||||
p.precedenceStack.Pop()
|
||||
_, _ = p.precedenceStack.Pop()
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
retCtx := p.ctx // save current ctx (return value)
|
||||
// unroll so ctx is as it was before call to recursive method
|
||||
@@ -561,29 +563,22 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
|
||||
func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
|
||||
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
|
||||
// TODO: useful in parser?
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Checks whether or not {@code symbol} can follow the current state in the
|
||||
// ATN. The behavior of p.method is equivalent to the following, but is
|
||||
// IsExpectedToken checks whether symbol can follow the current state in the
|
||||
// {ATN}. The behavior of p.method is equivalent to the following, but is
|
||||
// implemented such that the complete context-sensitive follow set does not
|
||||
// need to be explicitly constructed.
|
||||
//
|
||||
// <pre>
|
||||
// return getExpectedTokens().contains(symbol)
|
||||
// </pre>
|
||||
//
|
||||
// @param symbol the symbol type to check
|
||||
// @return {@code true} if {@code symbol} can follow the current state in
|
||||
// the ATN, otherwise {@code false}.
|
||||
|
||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
atn := p.Interpreter.atn
|
||||
ctx := p.ctx
|
||||
@@ -611,11 +606,9 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Computes the set of input symbols which could follow the current parser
|
||||
// state and context, as given by {@link //GetState} and {@link //GetContext},
|
||||
// GetExpectedTokens and returns the set of input symbols which could follow the current parser
|
||||
// state and context, as given by [GetState] and [GetContext],
|
||||
// respectively.
|
||||
//
|
||||
// @see ATN//getExpectedTokens(int, RuleContext)
|
||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
||||
}
|
||||
@@ -626,7 +619,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
|
||||
return atn.NextTokens(s, nil)
|
||||
}
|
||||
|
||||
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
|
||||
// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
|
||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
|
||||
if ok {
|
||||
@@ -636,13 +629,10 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Return List<String> of the rule names in your parser instance
|
||||
// GetRuleInvocationStack returns a list of the rule names in your parser instance
|
||||
// leading up to a call to the current rule. You could override if
|
||||
// you want more details such as the file/line info of where
|
||||
// in the ATN a rule is invoked.
|
||||
//
|
||||
// this very useful for error messages.
|
||||
|
||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
if c == nil {
|
||||
c = p.ctx
|
||||
@@ -668,16 +658,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
return stack
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
// GetDFAStrings returns a list of all DFA states used for debugging purposes
|
||||
func (p *BaseParser) GetDFAStrings() string {
|
||||
return fmt.Sprint(p.Interpreter.decisionToDFA)
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
// DumpDFA prints the whole of the DFA for debugging
|
||||
func (p *BaseParser) DumpDFA() {
|
||||
seenOne := false
|
||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
||||
if dfa.states.Len() > 0 {
|
||||
if dfa.Len() > 0 {
|
||||
if seenOne {
|
||||
fmt.Println()
|
||||
}
|
||||
@@ -692,8 +682,10 @@ func (p *BaseParser) GetSourceName() string {
|
||||
return p.GrammarFileName
|
||||
}
|
||||
|
||||
// During a parse is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. p.is for quick and dirty debugging.
|
||||
// SetTrace installs a trace listener for the parse.
|
||||
//
|
||||
// During a parse it is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. This is for quick and dirty debugging.
|
||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
||||
if trace == nil {
|
||||
p.RemoveParseListener(p.tracer)
|
File diff suppressed because it is too large
Load Diff
@@ -31,7 +31,9 @@ type ParserRuleContext interface {
|
||||
}
|
||||
|
||||
type BaseParserRuleContext struct {
|
||||
*BaseRuleContext
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
|
||||
start, stop Token
|
||||
exception RecognitionException
|
||||
@@ -40,8 +42,22 @@ type BaseParserRuleContext struct {
|
||||
|
||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
|
||||
prc := new(BaseParserRuleContext)
|
||||
InitBaseParserRuleContext(prc, parent, invokingStateNumber)
|
||||
return prc
|
||||
}
|
||||
|
||||
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
|
||||
func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
|
||||
// What context invoked b rule?
|
||||
prc.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
prc.invokingState = -1
|
||||
} else {
|
||||
prc.invokingState = invokingStateNumber
|
||||
}
|
||||
|
||||
prc.RuleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a Visitor,
|
||||
@@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int)
|
||||
// The exception that forced prc rule to return. If the rule successfully
|
||||
// completed, prc is {@code nil}.
|
||||
prc.exception = nil
|
||||
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
|
||||
@@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Double dispatch methods for listeners
|
||||
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
|
||||
// EnterRule is called when any rule is entered.
|
||||
func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
|
||||
// ExitRule is called when any rule is exited.
|
||||
func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
// * Does not set parent link other add methods do that///
|
||||
// * Does not set parent link other add methods do that
|
||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
@@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
|
||||
return child
|
||||
}
|
||||
|
||||
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
|
||||
// we entered a rule. If we have // label, we will need to remove
|
||||
// generic ruleContext object.
|
||||
// /
|
||||
// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
|
||||
// we entered a rule. If we have a label, we will need to remove
|
||||
// the generic ruleContext object.
|
||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
|
||||
if prc.children != nil && len(prc.children) > 0 {
|
||||
prc.children = prc.children[0 : len(prc.children)-1]
|
||||
@@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
|
||||
return len(prc.children)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
|
||||
if prc.start == nil || prc.stop == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
@@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
|
||||
return s
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
prc.parentCtx = nil
|
||||
} else {
|
||||
prc.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetInvokingState() int {
|
||||
return prc.invokingState
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetInvokingState(t int) {
|
||||
prc.invokingState = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetRuleIndex() int {
|
||||
return prc.RuleIndex
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
|
||||
|
||||
// IsEmpty returns true if the context of b is empty.
|
||||
//
|
||||
// A context is empty if there is no invoking state, meaning nobody calls
|
||||
// current context.
|
||||
func (prc *BaseParserRuleContext) IsEmpty() bool {
|
||||
return prc.invokingState == -1
|
||||
}
|
||||
|
||||
// GetParent returns the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
//
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of this
|
||||
// method.
|
||||
func (prc *BaseParserRuleContext) GetParent() Tree {
|
||||
return prc.parentCtx
|
||||
}
|
||||
|
||||
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
|
||||
type InterpreterRuleContext interface {
|
||||
@@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct {
|
||||
*BaseParserRuleContext
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
|
||||
|
||||
prc := new(BaseInterpreterRuleContext)
|
727
vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
727
vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
@@ -0,0 +1,727 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
const (
|
||||
// BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
|
||||
// doesn't mean wildcard:
|
||||
//
|
||||
// $ + x = [$,x]
|
||||
//
|
||||
// Here,
|
||||
//
|
||||
// $ = EmptyReturnState
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
const (
|
||||
PredictionContextEmpty = iota
|
||||
PredictionContextSingleton
|
||||
PredictionContextArray
|
||||
)
|
||||
|
||||
// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
|
||||
// emulate inheritance from Java, and can be used without an interface definition. An interface
|
||||
// is not required because no user code will ever need to implement this interface.
|
||||
type PredictionContext struct {
|
||||
cachedHash int
|
||||
pcType int
|
||||
parentCtx *PredictionContext
|
||||
returnState int
|
||||
parents []*PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *PredictionContext {
|
||||
nep := &PredictionContext{}
|
||||
nep.cachedHash = calculateEmptyHash()
|
||||
nep.pcType = PredictionContextEmpty
|
||||
nep.returnState = BasePredictionContextEmptyReturnState
|
||||
return nep
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
pc := &PredictionContext{}
|
||||
pc.pcType = PredictionContextSingleton
|
||||
pc.returnState = returnState
|
||||
pc.parentCtx = parent
|
||||
if parent != nil {
|
||||
pc.cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
pc.cachedHash = calculateEmptyHash()
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
nec := &PredictionContext{}
|
||||
nec.cachedHash = hash
|
||||
nec.pcType = PredictionContextArray
|
||||
nec.parents = parents
|
||||
nec.returnStates = returnStates
|
||||
return nec
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Hash() int {
|
||||
return p.cachedHash
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
otherP := other.(*PredictionContext)
|
||||
return other == nil || otherP == nil || otherP.isEmpty()
|
||||
case PredictionContextSingleton:
|
||||
return p.SingletonEquals(other)
|
||||
case PredictionContextArray:
|
||||
return p.ArrayEquals(other)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
other := o.(*PredictionContext)
|
||||
if other == nil || other.pcType != PredictionContextArray {
|
||||
return false
|
||||
}
|
||||
if p.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(p.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
otherP := other.(*PredictionContext)
|
||||
if otherP == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.cachedHash != otherP.Hash() {
|
||||
return false // Can't be same if hash is different
|
||||
}
|
||||
|
||||
if p.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Both parents must be nil if one is
|
||||
if p.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return p.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetParent(i int) *PredictionContext {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return nil
|
||||
case PredictionContextSingleton:
|
||||
return p.parentCtx
|
||||
case PredictionContextArray:
|
||||
return p.parents[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PredictionContext) getReturnState(i int) int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates[i]
|
||||
default:
|
||||
return p.returnState
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetReturnStates() []int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates
|
||||
default:
|
||||
return []int{p.returnState}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) length() int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return len(p.returnStates)
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) hasEmptyPath() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextSingleton:
|
||||
return p.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (p *PredictionContext) String() string {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return "$"
|
||||
case PredictionContextSingleton:
|
||||
var up string
|
||||
|
||||
if p.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = p.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if p.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState) + " " + up
|
||||
case PredictionContextArray:
|
||||
if p.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(p.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if p.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(p.returnStates[i])
|
||||
if !p.parents[i].isEmpty() {
|
||||
s = s + " " + p.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
return s + "]"
|
||||
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) isEmpty() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return true
|
||||
case PredictionContextArray:
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return p.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Type() int {
|
||||
return p.pcType
|
||||
}
|
||||
|
||||
func calculateHash(parent *PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
|
||||
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as wildcard
|
||||
if rootIsWildcard {
|
||||
if a.isEmpty() {
|
||||
return a
|
||||
}
|
||||
if b.isEmpty() {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Convert either Singleton or Empty to arrays, so that we can merge them
|
||||
//
|
||||
ara := convertToArray(a)
|
||||
arb := convertToArray(b)
|
||||
return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
func convertToArray(pc *PredictionContext) *PredictionContext {
|
||||
switch pc.Type() {
|
||||
case PredictionContextEmpty:
|
||||
return NewArrayPredictionContext([]*PredictionContext{}, []int{})
|
||||
case PredictionContextSingleton:
|
||||
return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
|
||||
default:
|
||||
// Already an array
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// mergeSingletons merges two Singleton [PredictionContext] instances.
|
||||
//
|
||||
// Stack tops equal, parents merge is same return left graph.
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A new root node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent.Equals(a.parentCtx) {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent.Equals(b.parentCtx) {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array.
|
||||
// New joined parent so create a new singleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent *PredictionContext
|
||||
if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []*PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []*PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []*PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a.isEmpty() && b.isEmpty() {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a.isEmpty() { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
//goland:noinspection GoBoolExpressions
|
||||
func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
|
||||
if M.Equals(a) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, a)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M.Equals(b) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, b)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(&mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, M)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
// Make pass over all M parents and merge any Equals() ones.
|
||||
// Note that we pass a pointer to the slice as we want to modify it in place.
|
||||
//
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func combineCommonParents(parents *[]*PredictionContext) {
|
||||
uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
|
||||
|
||||
for p := 0; p < len(*parents); p++ {
|
||||
parent := (*parents)[p]
|
||||
_, _ = uniqueParents.Put(parent)
|
||||
}
|
||||
for q := 0; q < len(*parents); q++ {
|
||||
pc, _ := uniqueParents.Get((*parents)[q])
|
||||
(*parents)[q] = pc
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing, present := visited.Get(context)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
|
||||
existing, present = contextCache.Get(context)
|
||||
if present {
|
||||
visited.Put(context, existing)
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]*PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || !parent.Equals(context.GetParent(i)) {
|
||||
if !changed {
|
||||
parents = make([]*PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited.Put(context, context)
|
||||
return context
|
||||
}
|
||||
var updated *PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited.Put(updated, updated)
|
||||
visited.Put(context, updated)
|
||||
|
||||
return updated
|
||||
}
|
48
vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
48
vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package antlr
|
||||
|
||||
var BasePredictionContextEMPTY = &PredictionContext{
|
||||
cachedHash: calculateEmptyHash(),
|
||||
pcType: PredictionContextEmpty,
|
||||
returnState: BasePredictionContextEmptyReturnState,
|
||||
}
|
||||
|
||||
// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
type PredictionContextCache struct {
|
||||
cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
return &PredictionContextCache{
|
||||
cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
|
||||
}
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a new context to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
|
||||
if ctx.isEmpty() {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
// Put will return the existing entry if it is present (note this is done via Equals, not whether it is
|
||||
// the same pointer), otherwise it will add the new entry and return that.
|
||||
//
|
||||
existing, present := p.cache.Get(ctx)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
p.cache.Put(ctx, ctx)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
|
||||
pc, exists := p.cache.Get(ctx)
|
||||
return pc, exists
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return p.cache.Len()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user