Bump cel-go to v0.12.0
This commit is contained in:
21
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
generated
vendored
21
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
generated
vendored
@@ -4,6 +4,8 @@
|
||||
|
||||
package antlr
|
||||
|
||||
import "sync"
|
||||
|
||||
var ATNInvalidAltNumber int
|
||||
|
||||
type ATN struct {
|
||||
@@ -37,6 +39,10 @@ type ATN struct {
|
||||
ruleToTokenType []int
|
||||
|
||||
states []ATNState
|
||||
|
||||
mu sync.Mutex
|
||||
stateMu sync.RWMutex
|
||||
edgeMu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
@@ -59,14 +65,15 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
|
||||
// rule.
|
||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
if s.GetNextTokenWithinRule() != nil {
|
||||
return s.GetNextTokenWithinRule()
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
iset := s.GetNextTokenWithinRule()
|
||||
if iset == nil {
|
||||
iset = a.NextTokensInContext(s, nil)
|
||||
iset.readOnly = true
|
||||
s.SetNextTokenWithinRule(iset)
|
||||
}
|
||||
|
||||
s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
|
||||
s.GetNextTokenWithinRule().readOnly = true
|
||||
|
||||
return s.GetNextTokenWithinRule()
|
||||
return iset
|
||||
}
|
||||
|
||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
|
8
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
generated
vendored
8
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
generated
vendored
@@ -104,7 +104,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
|
||||
configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() Set {
|
||||
states := NewArray2DHashSet(nil, nil)
|
||||
states := newArray2DHashSet(nil, nil)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Add(b.configs[i].GetState())
|
||||
@@ -283,7 +283,7 @@ func (b *BaseATNConfigSet) Clear() {
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewArray2DHashSet(nil, equalATNConfigs)
|
||||
b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
@@ -365,7 +365,7 @@ type OrderedATNConfigSet struct {
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
b.configLookup = NewArray2DHashSet(nil, nil)
|
||||
b.configLookup = newArray2DHashSet(nil, nil)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
|
50
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
50
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
@@ -4,7 +4,9 @@
|
||||
|
||||
package antlr
|
||||
|
||||
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
|
||||
import "errors"
|
||||
|
||||
var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
|
||||
|
||||
type ATNDeserializationOptions struct {
|
||||
readOnly bool
|
||||
@@ -12,14 +14,48 @@ type ATNDeserializationOptions struct {
|
||||
generateRuleBypassTransitions bool
|
||||
}
|
||||
|
||||
func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
|
||||
o := new(ATNDeserializationOptions)
|
||||
func (opts *ATNDeserializationOptions) ReadOnly() bool {
|
||||
return opts.readOnly
|
||||
}
|
||||
|
||||
if CopyFrom != nil {
|
||||
o.readOnly = CopyFrom.readOnly
|
||||
o.verifyATN = CopyFrom.verifyATN
|
||||
o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
|
||||
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.readOnly = readOnly
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) VerifyATN() bool {
|
||||
return opts.verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.verifyATN = verifyATN
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
|
||||
return opts.generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
|
||||
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
|
||||
}
|
||||
|
||||
func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
|
||||
o := new(ATNDeserializationOptions)
|
||||
if other != nil {
|
||||
*o = *other
|
||||
o.readOnly = false
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
299
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
generated
vendored
299
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
generated
vendored
@@ -5,50 +5,34 @@
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// This is the earliest supported serialized UUID.
|
||||
// stick to serialized version for now, we don't need a UUID instance
|
||||
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
|
||||
var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
|
||||
const serializedVersion = 4
|
||||
|
||||
// This list contains all of the currently supported UUIDs, ordered by when
|
||||
// the feature first appeared in this branch.
|
||||
var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
|
||||
|
||||
var SerializedVersion = 3
|
||||
|
||||
// This is the current serialized UUID.
|
||||
var SerializedUUID = AddedUnicodeSMP
|
||||
|
||||
type LoopEndStateIntPair struct {
|
||||
type loopEndStateIntPair struct {
|
||||
item0 *LoopEndState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type BlockStartStateIntPair struct {
|
||||
type blockStartStateIntPair struct {
|
||||
item0 BlockStartState
|
||||
item1 int
|
||||
}
|
||||
|
||||
type ATNDeserializer struct {
|
||||
deserializationOptions *ATNDeserializationOptions
|
||||
data []rune
|
||||
pos int
|
||||
uuid string
|
||||
options *ATNDeserializationOptions
|
||||
data []int32
|
||||
pos int
|
||||
}
|
||||
|
||||
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
||||
if options == nil {
|
||||
options = ATNDeserializationOptionsdefaultOptions
|
||||
options = &defaultATNDeserializationOptions
|
||||
}
|
||||
|
||||
return &ATNDeserializer{deserializationOptions: options}
|
||||
return &ATNDeserializer{options: options}
|
||||
}
|
||||
|
||||
func stringInSlice(a string, list []string) int {
|
||||
@@ -61,30 +45,10 @@ func stringInSlice(a string, list []string) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// isFeatureSupported determines if a particular serialized representation of an
|
||||
// ATN supports a particular feature, identified by the UUID used for
|
||||
// serializing the ATN at the time the feature was first introduced. Feature is
|
||||
// the UUID marking the first time the feature was supported in the serialized
|
||||
// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
|
||||
// being deserialized. It returns true if actualUuid represents a serialized ATN
|
||||
// at or after the feature identified by feature was introduced, and otherwise
|
||||
// false.
|
||||
func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
|
||||
idx1 := stringInSlice(feature, SupportedUUIDs)
|
||||
|
||||
if idx1 < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
idx2 := stringInSlice(actualUUID, SupportedUUIDs)
|
||||
|
||||
return idx2 >= idx1
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
|
||||
a.reset(utf16.Decode(data))
|
||||
func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
|
||||
a.data = data
|
||||
a.pos = 0
|
||||
a.checkVersion()
|
||||
a.checkUUID()
|
||||
|
||||
atn := a.readATN()
|
||||
|
||||
@@ -92,15 +56,7 @@ func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
|
||||
a.readRules(atn)
|
||||
a.readModes(atn)
|
||||
|
||||
sets := make([]*IntervalSet, 0)
|
||||
|
||||
// First, deserialize sets with 16-bit arguments <= U+FFFF.
|
||||
sets = a.readSets(atn, sets, a.readInt)
|
||||
// Next, if the ATN was serialized with the Unicode SMP feature,
|
||||
// deserialize sets with 32-bit arguments <= U+10FFFF.
|
||||
if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
|
||||
sets = a.readSets(atn, sets, a.readInt32)
|
||||
}
|
||||
sets := a.readSets(atn, nil)
|
||||
|
||||
a.readEdges(atn, sets)
|
||||
a.readDecisions(atn)
|
||||
@@ -108,7 +64,7 @@ func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
|
||||
a.markPrecedenceDecisions(atn)
|
||||
a.verifyATN(atn)
|
||||
|
||||
if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
|
||||
if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
|
||||
a.generateRuleBypassTransitions(atn)
|
||||
// Re-verify after modification
|
||||
a.verifyATN(atn)
|
||||
@@ -118,42 +74,14 @@ func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
|
||||
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) reset(data []rune) {
|
||||
temp := make([]rune, len(data))
|
||||
|
||||
for i, c := range data {
|
||||
// Don't adjust the first value since that's the version number
|
||||
if i == 0 {
|
||||
temp[i] = c
|
||||
} else if c > 1 {
|
||||
temp[i] = c - 2
|
||||
} else {
|
||||
temp[i] = c + 65533
|
||||
}
|
||||
}
|
||||
|
||||
a.data = temp
|
||||
a.pos = 0
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkVersion() {
|
||||
version := a.readInt()
|
||||
|
||||
if version != SerializedVersion {
|
||||
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
|
||||
if version != serializedVersion {
|
||||
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) checkUUID() {
|
||||
uuid := a.readUUID()
|
||||
|
||||
if stringInSlice(uuid, SupportedUUIDs) < 0 {
|
||||
panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
|
||||
}
|
||||
|
||||
a.uuid = uuid
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readATN() *ATN {
|
||||
grammarType := a.readInt()
|
||||
maxTokenType := a.readInt()
|
||||
@@ -162,37 +90,36 @@ func (a *ATNDeserializer) readATN() *ATN {
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readStates(atn *ATN) {
|
||||
loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
|
||||
endStateNumbers := make([]BlockStartStateIntPair, 0)
|
||||
|
||||
nstates := a.readInt()
|
||||
|
||||
// Allocate worst case size.
|
||||
loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
|
||||
endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
|
||||
|
||||
// Preallocate states slice.
|
||||
atn.states = make([]ATNState, 0, nstates)
|
||||
|
||||
for i := 0; i < nstates; i++ {
|
||||
stype := a.readInt()
|
||||
|
||||
// Ignore bad types of states
|
||||
if stype == ATNStateInvalidType {
|
||||
atn.addState(nil)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
ruleIndex := a.readInt()
|
||||
|
||||
if ruleIndex == 0xFFFF {
|
||||
ruleIndex = -1
|
||||
}
|
||||
|
||||
s := a.stateFactory(stype, ruleIndex)
|
||||
|
||||
if stype == ATNStateLoopEnd {
|
||||
loopBackStateNumber := a.readInt()
|
||||
|
||||
loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
|
||||
loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
|
||||
} else if s2, ok := s.(BlockStartState); ok {
|
||||
endStateNumber := a.readInt()
|
||||
|
||||
endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
|
||||
endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
|
||||
}
|
||||
|
||||
atn.addState(s)
|
||||
@@ -200,20 +127,15 @@ func (a *ATNDeserializer) readStates(atn *ATN) {
|
||||
|
||||
// Delay the assignment of loop back and end states until we know all the state
|
||||
// instances have been initialized
|
||||
for j := 0; j < len(loopBackStateNumbers); j++ {
|
||||
pair := loopBackStateNumbers[j]
|
||||
|
||||
for _, pair := range loopBackStateNumbers {
|
||||
pair.item0.loopBackState = atn.states[pair.item1]
|
||||
}
|
||||
|
||||
for j := 0; j < len(endStateNumbers); j++ {
|
||||
pair := endStateNumbers[j]
|
||||
|
||||
for _, pair := range endStateNumbers {
|
||||
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
|
||||
}
|
||||
|
||||
numNonGreedyStates := a.readInt()
|
||||
|
||||
for j := 0; j < numNonGreedyStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
@@ -221,7 +143,6 @@ func (a *ATNDeserializer) readStates(atn *ATN) {
|
||||
}
|
||||
|
||||
numPrecedenceStates := a.readInt()
|
||||
|
||||
for j := 0; j < numPrecedenceStates; j++ {
|
||||
stateNumber := a.readInt()
|
||||
|
||||
@@ -233,12 +154,12 @@ func (a *ATNDeserializer) readRules(atn *ATN) {
|
||||
nrules := a.readInt()
|
||||
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
|
||||
atn.ruleToTokenType = make([]int, nrules)
|
||||
}
|
||||
|
||||
atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
|
||||
atn.ruleToStartState = make([]*RuleStartState, nrules)
|
||||
|
||||
for i := 0; i < nrules; i++ {
|
||||
for i := range atn.ruleToStartState {
|
||||
s := a.readInt()
|
||||
startState := atn.states[s].(*RuleStartState)
|
||||
|
||||
@@ -247,19 +168,13 @@ func (a *ATNDeserializer) readRules(atn *ATN) {
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
tokenType := a.readInt()
|
||||
|
||||
if tokenType == 0xFFFF {
|
||||
tokenType = TokenEOF
|
||||
}
|
||||
|
||||
atn.ruleToTokenType[i] = tokenType
|
||||
}
|
||||
}
|
||||
|
||||
atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
|
||||
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
atn.ruleToStopState = make([]*RuleStopState, nrules)
|
||||
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(*RuleStopState); ok {
|
||||
atn.ruleToStopState[s2.ruleIndex] = s2
|
||||
atn.ruleToStartState[s2.ruleIndex].stopState = s2
|
||||
@@ -269,17 +184,25 @@ func (a *ATNDeserializer) readRules(atn *ATN) {
|
||||
|
||||
func (a *ATNDeserializer) readModes(atn *ATN) {
|
||||
nmodes := a.readInt()
|
||||
atn.modeToStartState = make([]*TokensStartState, nmodes)
|
||||
|
||||
for i := 0; i < nmodes; i++ {
|
||||
for i := range atn.modeToStartState {
|
||||
s := a.readInt()
|
||||
|
||||
atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
|
||||
atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
|
||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
m := a.readInt()
|
||||
|
||||
// Preallocate the needed capacity.
|
||||
if cap(sets)-len(sets) < m {
|
||||
isets := make([]*IntervalSet, len(sets), len(sets)+m)
|
||||
copy(isets, sets)
|
||||
sets = isets
|
||||
}
|
||||
|
||||
for i := 0; i < m; i++ {
|
||||
iset := NewIntervalSet()
|
||||
|
||||
@@ -293,8 +216,8 @@ func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode fu
|
||||
}
|
||||
|
||||
for j := 0; j < n; j++ {
|
||||
i1 := readUnicode()
|
||||
i2 := readUnicode()
|
||||
i1 := a.readInt()
|
||||
i2 := a.readInt()
|
||||
|
||||
iset.addRange(i1, i2)
|
||||
}
|
||||
@@ -322,11 +245,9 @@ func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
|
||||
}
|
||||
|
||||
// Edges for rule stop states can be derived, so they are not serialized
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
for j := 0; j < len(state.GetTransitions()); j++ {
|
||||
var t, ok = state.GetTransitions()[j].(*RuleTransition)
|
||||
for _, state := range atn.states {
|
||||
for _, t := range state.GetTransitions() {
|
||||
var rt, ok = t.(*RuleTransition)
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
@@ -334,48 +255,42 @@ func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
|
||||
|
||||
outermostPrecedenceReturn := -1
|
||||
|
||||
if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
|
||||
if t.precedence == 0 {
|
||||
outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
|
||||
if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
|
||||
if rt.precedence == 0 {
|
||||
outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
|
||||
}
|
||||
}
|
||||
|
||||
trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
|
||||
trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
|
||||
|
||||
atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
|
||||
atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
if s2, ok := state.(*BaseBlockStartState); ok {
|
||||
for _, state := range atn.states {
|
||||
if s2, ok := state.(BlockStartState); ok {
|
||||
// We need to know the end state to set its start state
|
||||
if s2.endState == nil {
|
||||
if s2.getEndState() == nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
// Block end states can only be associated to a single block start state
|
||||
if s2.endState.startState != nil {
|
||||
if s2.getEndState().startState != nil {
|
||||
panic("IllegalState")
|
||||
}
|
||||
|
||||
s2.endState.startState = state
|
||||
s2.getEndState().startState = state
|
||||
}
|
||||
|
||||
if s2, ok := state.(*PlusLoopbackState); ok {
|
||||
for j := 0; j < len(s2.GetTransitions()); j++ {
|
||||
target := s2.GetTransitions()[j].getTarget()
|
||||
|
||||
if t2, ok := target.(*PlusBlockStartState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
} else if s2, ok := state.(*StarLoopbackState); ok {
|
||||
for j := 0; j < len(s2.GetTransitions()); j++ {
|
||||
target := s2.GetTransitions()[j].getTarget()
|
||||
|
||||
if t2, ok := target.(*StarLoopEntryState); ok {
|
||||
for _, t := range s2.GetTransitions() {
|
||||
if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
|
||||
t2.loopBackState = state
|
||||
}
|
||||
}
|
||||
@@ -399,25 +314,13 @@ func (a *ATNDeserializer) readLexerActions(atn *ATN) {
|
||||
if atn.grammarType == ATNTypeLexer {
|
||||
count := a.readInt()
|
||||
|
||||
atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
|
||||
atn.lexerActions = make([]LexerAction, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range atn.lexerActions {
|
||||
actionType := a.readInt()
|
||||
data1 := a.readInt()
|
||||
|
||||
if data1 == 0xFFFF {
|
||||
data1 = -1
|
||||
}
|
||||
|
||||
data2 := a.readInt()
|
||||
|
||||
if data2 == 0xFFFF {
|
||||
data2 = -1
|
||||
}
|
||||
|
||||
lexerAction := a.lexerActionFactory(actionType, data1, data2)
|
||||
|
||||
atn.lexerActions[i] = lexerAction
|
||||
atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -565,14 +468,12 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
if !a.deserializationOptions.verifyATN {
|
||||
if !a.options.VerifyATN() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify assumptions
|
||||
for i := 0; i < len(atn.states); i++ {
|
||||
state := atn.states[i]
|
||||
|
||||
for _, state := range atn.states {
|
||||
if state == nil {
|
||||
continue
|
||||
}
|
||||
@@ -587,18 +488,18 @@ func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
a.checkCondition(len(s2.GetTransitions()) == 2, "")
|
||||
|
||||
switch s2 := state.(type) {
|
||||
switch s2.transitions[0].getTarget().(type) {
|
||||
case *StarBlockStartState:
|
||||
var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
|
||||
_, ok := s2.transitions[1].getTarget().(*LoopEndState)
|
||||
|
||||
a.checkCondition(ok2, "")
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(!s2.nonGreedy, "")
|
||||
|
||||
case *LoopEndState:
|
||||
var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
|
||||
var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
|
||||
|
||||
a.checkCondition(ok2, "")
|
||||
a.checkCondition(s3.nonGreedy, "")
|
||||
a.checkCondition(ok, "")
|
||||
a.checkCondition(s2.nonGreedy, "")
|
||||
|
||||
default:
|
||||
panic("IllegalState")
|
||||
@@ -607,9 +508,9 @@ func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
case *StarLoopbackState:
|
||||
a.checkCondition(len(state.GetTransitions()) == 1, "")
|
||||
|
||||
var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
|
||||
var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
|
||||
|
||||
a.checkCondition(ok2, "")
|
||||
a.checkCondition(ok, "")
|
||||
|
||||
case *LoopEndState:
|
||||
a.checkCondition(s2.loopBackState != nil, "")
|
||||
@@ -617,8 +518,8 @@ func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
||||
case *RuleStartState:
|
||||
a.checkCondition(s2.stopState != nil, "")
|
||||
|
||||
case *BaseBlockStartState:
|
||||
a.checkCondition(s2.endState != nil, "")
|
||||
case BlockStartState:
|
||||
a.checkCondition(s2.getEndState() != nil, "")
|
||||
|
||||
case *BlockEndState:
|
||||
a.checkCondition(s2.startState != nil, "")
|
||||
@@ -649,53 +550,7 @@ func (a *ATNDeserializer) readInt() int {
|
||||
|
||||
a.pos++
|
||||
|
||||
return int(v)
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readInt32() int {
|
||||
var low = a.readInt()
|
||||
var high = a.readInt()
|
||||
return low | (high << 16)
|
||||
}
|
||||
|
||||
//TODO
|
||||
//func (a *ATNDeserializer) readLong() int64 {
|
||||
// panic("Not implemented")
|
||||
// var low = a.readInt32()
|
||||
// var high = a.readInt32()
|
||||
// return (low & 0x00000000FFFFFFFF) | (high << int32)
|
||||
//}
|
||||
|
||||
func createByteToHex() []string {
|
||||
bth := make([]string, 256)
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
|
||||
}
|
||||
|
||||
return bth
|
||||
}
|
||||
|
||||
var byteToHex = createByteToHex()
|
||||
|
||||
func (a *ATNDeserializer) readUUID() string {
|
||||
bb := make([]int, 16)
|
||||
|
||||
for i := 7; i >= 0; i-- {
|
||||
integer := a.readInt()
|
||||
|
||||
bb[(2*i)+1] = integer & 0xFF
|
||||
bb[2*i] = (integer >> 8) & 0xFF
|
||||
}
|
||||
|
||||
return byteToHex[bb[0]] + byteToHex[bb[1]] +
|
||||
byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
|
||||
byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
|
||||
byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
|
||||
byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
|
||||
byteToHex[bb[10]] + byteToHex[bb[11]] +
|
||||
byteToHex[bb[12]] + byteToHex[bb[13]] +
|
||||
byteToHex[bb[14]] + byteToHex[bb[15]]
|
||||
return int(v) // data is 32 bits but int is at least that big
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
|
6
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
generated
vendored
6
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
generated
vendored
@@ -243,6 +243,8 @@ func NewBasicBlockStartState() *BasicBlockStartState {
|
||||
return &BasicBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &BasicBlockStartState{}
|
||||
|
||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
||||
type BlockEndState struct {
|
||||
*BaseATNState
|
||||
@@ -318,6 +320,8 @@ func NewPlusBlockStartState() *PlusBlockStartState {
|
||||
return &PlusBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &PlusBlockStartState{}
|
||||
|
||||
// StarBlockStartState is the block that begins a closure loop.
|
||||
type StarBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
@@ -331,6 +335,8 @@ func NewStarBlockStartState() *StarBlockStartState {
|
||||
return &StarBlockStartState{BaseBlockStartState: b}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &StarBlockStartState{}
|
||||
|
||||
type StarLoopbackState struct {
|
||||
*BaseATNState
|
||||
}
|
||||
|
29
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
generated
vendored
29
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
generated
vendored
@@ -6,7 +6,6 @@ package antlr
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type DFA struct {
|
||||
@@ -18,23 +17,27 @@ type DFA struct {
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there.
|
||||
states map[int]*DFAState
|
||||
statesMu sync.RWMutex
|
||||
|
||||
s0 *DFAState
|
||||
s0Mu sync.RWMutex
|
||||
|
||||
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
|
||||
// True if the DFA is for a precedence decision and false otherwise.
|
||||
precedenceDfa bool
|
||||
precedenceDfaMu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
return &DFA{
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: make(map[int]*DFAState),
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
dfa.s0.isAcceptState = false
|
||||
dfa.s0.requiresFullContext = false
|
||||
}
|
||||
return dfa
|
||||
}
|
||||
|
||||
// getPrecedenceStartState gets the start state for the current precedence and
|
||||
@@ -79,8 +82,6 @@ func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
|
||||
}
|
||||
|
||||
func (d *DFA) getPrecedenceDfa() bool {
|
||||
d.precedenceDfaMu.RLock()
|
||||
defer d.precedenceDfaMu.RUnlock()
|
||||
return d.precedenceDfa
|
||||
}
|
||||
|
||||
@@ -104,46 +105,32 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
d.setS0(nil)
|
||||
}
|
||||
|
||||
d.precedenceDfaMu.Lock()
|
||||
defer d.precedenceDfaMu.Unlock()
|
||||
d.precedenceDfa = precedenceDfa
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DFA) getS0() *DFAState {
|
||||
d.s0Mu.RLock()
|
||||
defer d.s0Mu.RUnlock()
|
||||
return d.s0
|
||||
}
|
||||
|
||||
func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0Mu.Lock()
|
||||
defer d.s0Mu.Unlock()
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
func (d *DFA) getState(hash int) (*DFAState, bool) {
|
||||
d.statesMu.RLock()
|
||||
defer d.statesMu.RUnlock()
|
||||
s, ok := d.states[hash]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
func (d *DFA) setStates(states map[int]*DFAState) {
|
||||
d.statesMu.Lock()
|
||||
defer d.statesMu.Unlock()
|
||||
d.states = states
|
||||
}
|
||||
|
||||
func (d *DFA) setState(hash int, state *DFAState) {
|
||||
d.statesMu.Lock()
|
||||
defer d.statesMu.Unlock()
|
||||
d.states[hash] = state
|
||||
}
|
||||
|
||||
func (d *DFA) numStates() int {
|
||||
d.statesMu.RLock()
|
||||
defer d.statesMu.RUnlock()
|
||||
return len(d.states)
|
||||
}
|
||||
|
||||
|
16
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
generated
vendored
16
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
generated
vendored
@@ -6,7 +6,6 @@ package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// PredPrediction maps a predicate to a predicted alternative.
|
||||
@@ -50,8 +49,7 @@ type DFAState struct {
|
||||
|
||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
||||
// Token.EOF maps to the first element.
|
||||
edges []*DFAState
|
||||
edgesMu sync.RWMutex
|
||||
edges []*DFAState
|
||||
|
||||
isAcceptState bool
|
||||
|
||||
@@ -93,7 +91,7 @@ func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
||||
|
||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
|
||||
func (d *DFAState) GetAltSet() Set {
|
||||
alts := NewArray2DHashSet(nil, nil)
|
||||
alts := newArray2DHashSet(nil, nil)
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.GetItems() {
|
||||
@@ -109,32 +107,22 @@ func (d *DFAState) GetAltSet() Set {
|
||||
}
|
||||
|
||||
func (d *DFAState) getEdges() []*DFAState {
|
||||
d.edgesMu.RLock()
|
||||
defer d.edgesMu.RUnlock()
|
||||
return d.edges
|
||||
}
|
||||
|
||||
func (d *DFAState) numEdges() int {
|
||||
d.edgesMu.RLock()
|
||||
defer d.edgesMu.RUnlock()
|
||||
return len(d.edges)
|
||||
}
|
||||
|
||||
func (d *DFAState) getIthEdge(i int) *DFAState {
|
||||
d.edgesMu.RLock()
|
||||
defer d.edgesMu.RUnlock()
|
||||
return d.edges[i]
|
||||
}
|
||||
|
||||
func (d *DFAState) setEdges(newEdges []*DFAState) {
|
||||
d.edgesMu.Lock()
|
||||
defer d.edgesMu.Unlock()
|
||||
d.edges = newEdges
|
||||
}
|
||||
|
||||
func (d *DFAState) setIthEdge(i int, edge *DFAState) {
|
||||
d.edgesMu.Lock()
|
||||
defer d.edgesMu.Unlock()
|
||||
d.edges[i] = edge
|
||||
}
|
||||
|
||||
|
20
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
generated
vendored
20
vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
generated
vendored
@@ -16,7 +16,7 @@ type ErrorStrategy interface {
|
||||
RecoverInline(Parser) Token
|
||||
Recover(Parser, RecognitionException)
|
||||
Sync(Parser)
|
||||
inErrorRecoveryMode(Parser) bool
|
||||
InErrorRecoveryMode(Parser) bool
|
||||
ReportError(Parser, RecognitionException)
|
||||
ReportMatch(Parser)
|
||||
}
|
||||
@@ -40,7 +40,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
||||
// error". This is used to suppress Reporting multiple error messages while
|
||||
// attempting to recover from a detected syntax error.
|
||||
//
|
||||
// @see //inErrorRecoveryMode
|
||||
// @see //InErrorRecoveryMode
|
||||
//
|
||||
d.errorRecoveryMode = false
|
||||
|
||||
@@ -71,7 +71,7 @@ func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
if d.inErrorRecoveryMode(recognizer) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return // don't Report spurious errors
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
@@ -209,7 +209,7 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException
|
||||
//
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.inErrorRecoveryMode(recognizer) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -312,7 +312,7 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.inErrorRecoveryMode(recognizer) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
@@ -341,7 +341,7 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
// @param recognizer the parser instance
|
||||
//
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.inErrorRecoveryMode(recognizer) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
}
|
||||
d.beginErrorCondition(recognizer)
|
||||
@@ -738,7 +738,11 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
context.SetException(e)
|
||||
context = context.GetParent().(ParserRuleContext)
|
||||
if parent, ok := context.GetParent().(ParserRuleContext); ok {
|
||||
context = parent
|
||||
} else {
|
||||
context = nil
|
||||
}
|
||||
}
|
||||
panic(NewParseCancellationException()) // TODO we don't emit e properly
|
||||
}
|
||||
|
48
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
generated
vendored
48
vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
generated
vendored
@@ -91,11 +91,16 @@ func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
|
||||
|
||||
dfa := l.decisionToDFA[mode]
|
||||
|
||||
if dfa.getS0() == nil {
|
||||
var s0 *DFAState
|
||||
l.atn.stateMu.RLock()
|
||||
s0 = dfa.getS0()
|
||||
l.atn.stateMu.RUnlock()
|
||||
|
||||
if s0 == nil {
|
||||
return l.MatchATN(input)
|
||||
}
|
||||
|
||||
return l.execATN(input, dfa.getS0())
|
||||
return l.execATN(input, s0)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) reset() {
|
||||
@@ -117,11 +122,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
suppressEdge := s0Closure.hasSemanticContext
|
||||
s0Closure.hasSemanticContext = false
|
||||
|
||||
next := l.addDFAState(s0Closure)
|
||||
|
||||
if !suppressEdge {
|
||||
l.decisionToDFA[l.mode].setS0(next)
|
||||
}
|
||||
next := l.addDFAState(s0Closure, suppressEdge)
|
||||
|
||||
predict := l.execATN(input, next)
|
||||
|
||||
@@ -203,10 +204,15 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
// {@code t}, or {@code nil} if the target state for l edge is not
|
||||
// already cached
|
||||
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
|
||||
if s.getEdges() == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
|
||||
if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.atn.edgeMu.RLock()
|
||||
defer l.atn.edgeMu.RUnlock()
|
||||
if s.getEdges() == nil {
|
||||
return nil
|
||||
}
|
||||
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
|
||||
if LexerATNSimulatorDebug && target != nil {
|
||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
||||
@@ -537,7 +543,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
suppressEdge := cfgs.HasSemanticContext()
|
||||
cfgs.SetHasSemanticContext(false)
|
||||
|
||||
to = l.addDFAState(cfgs)
|
||||
to = l.addDFAState(cfgs, true)
|
||||
|
||||
if suppressEdge {
|
||||
return to
|
||||
@@ -551,6 +557,8 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
||||
}
|
||||
l.atn.edgeMu.Lock()
|
||||
defer l.atn.edgeMu.Unlock()
|
||||
if from.getEdges() == nil {
|
||||
// make room for tokens 1..n and -1 masquerading as index 0
|
||||
from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
|
||||
@@ -564,7 +572,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// configurations already. This method also detects the first
|
||||
// configuration containing an ATN rule stop state. Later, when
|
||||
// traversing the DFA, we will know which rule to accept.
|
||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
|
||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
|
||||
proposed := NewDFAState(-1, configs)
|
||||
var firstConfigWithRuleStopState ATNConfig
|
||||
@@ -585,16 +593,22 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
|
||||
}
|
||||
hash := proposed.hash()
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, ok := dfa.getState(hash)
|
||||
if ok {
|
||||
return existing
|
||||
proposed = existing
|
||||
} else {
|
||||
proposed.stateNumber = dfa.numStates()
|
||||
configs.SetReadOnly(true)
|
||||
proposed.configs = configs
|
||||
dfa.setState(hash, proposed)
|
||||
}
|
||||
newState := proposed
|
||||
newState.stateNumber = dfa.numStates()
|
||||
configs.SetReadOnly(true)
|
||||
newState.configs = configs
|
||||
dfa.setState(hash, newState)
|
||||
return newState
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
}
|
||||
return proposed
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
||||
|
4
vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
generated
vendored
4
vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
generated
vendored
@@ -38,7 +38,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := NewArray2DHashSet(nil, nil)
|
||||
lookBusy := newArray2DHashSet(nil, nil)
|
||||
seeThruPreds := false // fail to get lookahead upon pred
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
||||
// Wipe out lookahead for la alternative if we found nothing
|
||||
@@ -75,7 +75,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, NewArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
|
||||
la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
|
||||
return r
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
generated
vendored
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
generated
vendored
@@ -425,7 +425,7 @@ func (p *BaseParser) Consume() Token {
|
||||
}
|
||||
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
|
||||
if p.BuildParseTrees || hasListener {
|
||||
if p.errHandler.inErrorRecoveryMode(p) {
|
||||
if p.errHandler.InErrorRecoveryMode(p) {
|
||||
node := p.ctx.AddErrorNode(o)
|
||||
if p.parseListeners != nil {
|
||||
for _, l := range p.parseListeners {
|
||||
|
38
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
generated
vendored
38
vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
generated
vendored
@@ -96,14 +96,18 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
|
||||
// Now we are certain to have a specific decision's DFA
|
||||
// But, do we still need an initial state?
|
||||
var s0 *DFAState
|
||||
p.atn.stateMu.RLock()
|
||||
if dfa.getPrecedenceDfa() {
|
||||
p.atn.edgeMu.RLock()
|
||||
// the start state for a precedence DFA depends on the current
|
||||
// parser precedence, and is provided by a DFA method.
|
||||
s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
|
||||
p.atn.edgeMu.RUnlock()
|
||||
} else {
|
||||
// the start state for a "regular" DFA is just s0
|
||||
s0 = dfa.getS0()
|
||||
}
|
||||
p.atn.stateMu.RUnlock()
|
||||
|
||||
if s0 == nil {
|
||||
if outerContext == nil {
|
||||
@@ -114,21 +118,10 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
|
||||
" exec LA(1)==" + p.getLookaheadName(input) +
|
||||
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
|
||||
}
|
||||
// If p is not a precedence DFA, we check the ATN start state
|
||||
// to determine if p ATN start state is the decision for the
|
||||
// closure block that determines whether a precedence rule
|
||||
// should continue or complete.
|
||||
|
||||
t2 := dfa.atnStartState
|
||||
t, ok := t2.(*StarLoopEntryState)
|
||||
if !dfa.getPrecedenceDfa() && ok {
|
||||
if t.precedenceRuleDecision {
|
||||
dfa.setPrecedenceDfa(true)
|
||||
}
|
||||
}
|
||||
fullCtx := false
|
||||
s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
|
||||
|
||||
p.atn.stateMu.Lock()
|
||||
if dfa.getPrecedenceDfa() {
|
||||
// If p is a precedence DFA, we use applyPrecedenceFilter
|
||||
// to convert the computed start state to a precedence start
|
||||
@@ -139,12 +132,16 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
|
||||
dfa.s0.configs = s0Closure
|
||||
s0Closure = p.applyPrecedenceFilter(s0Closure)
|
||||
s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
|
||||
p.atn.edgeMu.Lock()
|
||||
dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
|
||||
p.atn.edgeMu.Unlock()
|
||||
} else {
|
||||
s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
|
||||
dfa.setS0(s0)
|
||||
}
|
||||
p.atn.stateMu.Unlock()
|
||||
}
|
||||
|
||||
alt := p.execATN(dfa, s0, input, index, outerContext)
|
||||
if ParserATNSimulatorDebug {
|
||||
fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
|
||||
@@ -295,11 +292,16 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
|
||||
// already cached
|
||||
|
||||
func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
|
||||
edges := previousD.getEdges()
|
||||
if edges == nil || t+1 < 0 || t+1 >= len(edges) {
|
||||
if t+1 < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.atn.edgeMu.RLock()
|
||||
defer p.atn.edgeMu.RUnlock()
|
||||
edges := previousD.getEdges()
|
||||
if edges == nil || t+1 >= len(edges) {
|
||||
return nil
|
||||
}
|
||||
return previousD.getIthEdge(t + 1)
|
||||
}
|
||||
|
||||
@@ -568,7 +570,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
|
||||
//
|
||||
if reach == nil {
|
||||
reach = NewBaseATNConfigSet(fullCtx)
|
||||
closureBusy := NewArray2DHashSet(nil, nil)
|
||||
closureBusy := newArray2DHashSet(nil, nil)
|
||||
treatEOFAsEpsilon := t == TokenEOF
|
||||
amount := len(intermediate.configs)
|
||||
for k := 0; k < amount; k++ {
|
||||
@@ -663,7 +665,7 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
|
||||
for i := 0; i < len(a.GetTransitions()); i++ {
|
||||
target := a.GetTransitions()[i].getTarget()
|
||||
c := NewBaseATNConfig6(target, i+1, initialContext)
|
||||
closureBusy := NewArray2DHashSet(nil, nil)
|
||||
closureBusy := newArray2DHashSet(nil, nil)
|
||||
p.closure(c, configs, closureBusy, true, fullCtx, false)
|
||||
}
|
||||
return configs
|
||||
@@ -1446,14 +1448,18 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
|
||||
if to == nil {
|
||||
return nil
|
||||
}
|
||||
p.atn.stateMu.Lock()
|
||||
to = p.addDFAState(dfa, to) // used existing if possible not incoming
|
||||
p.atn.stateMu.Unlock()
|
||||
if from == nil || t < -1 || t > p.atn.maxTokenType {
|
||||
return to
|
||||
}
|
||||
p.atn.edgeMu.Lock()
|
||||
if from.getEdges() == nil {
|
||||
from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
|
||||
}
|
||||
from.setIthEdge(t+1, to) // connect
|
||||
p.atn.edgeMu.Unlock()
|
||||
|
||||
if ParserATNSimulatorDebug {
|
||||
var names []string
|
||||
|
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
generated
vendored
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
generated
vendored
@@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int)
|
||||
var ruleIndexMapCache = make(map[string]int)
|
||||
|
||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
runtimeVersion := "4.9.3"
|
||||
runtimeVersion := "4.10.1"
|
||||
if runtimeVersion != toolVersion {
|
||||
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
|
||||
}
|
||||
|
4
vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
generated
vendored
4
vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
generated
vendored
@@ -193,7 +193,7 @@ type AND struct {
|
||||
|
||||
func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
operands := NewArray2DHashSet(nil, nil)
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
if aa, ok := a.(*AND); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
@@ -345,7 +345,7 @@ type OR struct {
|
||||
|
||||
func NewOR(a, b SemanticContext) *OR {
|
||||
|
||||
operands := NewArray2DHashSet(nil, nil)
|
||||
operands := newArray2DHashSet(nil, nil)
|
||||
if aa, ok := a.(*OR); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Add(o)
|
||||
|
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
generated
vendored
2
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
generated
vendored
@@ -64,7 +64,7 @@ type BaseParseTreeVisitor struct{}
|
||||
|
||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
|
||||
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
|
||||
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
|
||||
|
134
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
generated
vendored
134
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
generated
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -71,59 +71,92 @@ type hasher interface {
|
||||
hash() int
|
||||
}
|
||||
|
||||
const bitsPerWord = 64
|
||||
|
||||
func indexForBit(bit int) int {
|
||||
return bit / bitsPerWord
|
||||
}
|
||||
|
||||
func wordForBit(data []uint64, bit int) uint64 {
|
||||
idx := indexForBit(bit)
|
||||
if idx >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return data[idx]
|
||||
}
|
||||
|
||||
func maskForBit(bit int) uint64 {
|
||||
return uint64(1) << (bit % bitsPerWord)
|
||||
}
|
||||
|
||||
func wordsNeeded(bit int) int {
|
||||
return indexForBit(bit) + 1
|
||||
}
|
||||
|
||||
type BitSet struct {
|
||||
data map[int]bool
|
||||
data []uint64
|
||||
}
|
||||
|
||||
func NewBitSet() *BitSet {
|
||||
b := new(BitSet)
|
||||
b.data = make(map[int]bool)
|
||||
return b
|
||||
return &BitSet{}
|
||||
}
|
||||
|
||||
func (b *BitSet) add(value int) {
|
||||
b.data[value] = true
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
size := wordsNeeded(value)
|
||||
data := make([]uint64, size)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
b.data[idx] |= maskForBit(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) clear(index int) {
|
||||
delete(b.data, index)
|
||||
idx := indexForBit(index)
|
||||
if idx >= len(b.data) {
|
||||
return
|
||||
}
|
||||
b.data[idx] &= ^maskForBit(index)
|
||||
}
|
||||
|
||||
func (b *BitSet) or(set *BitSet) {
|
||||
for k := range set.data {
|
||||
b.add(k)
|
||||
// Get min size necessary to represent the bits in both sets.
|
||||
bLen := b.minLen()
|
||||
setLen := set.minLen()
|
||||
maxLen := intMax(bLen, setLen)
|
||||
if maxLen > len(b.data) {
|
||||
// Increase the size of len(b.data) to repesent the bits in both sets.
|
||||
data := make([]uint64, maxLen)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
}
|
||||
// len(b.data) is at least setLen.
|
||||
for i := 0; i < setLen; i++ {
|
||||
b.data[i] |= set.data[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BitSet) remove(value int) {
|
||||
delete(b.data, value)
|
||||
b.clear(value)
|
||||
}
|
||||
|
||||
func (b *BitSet) contains(value int) bool {
|
||||
return b.data[value]
|
||||
}
|
||||
|
||||
func (b *BitSet) values() []int {
|
||||
ks := make([]int, len(b.data))
|
||||
i := 0
|
||||
for k := range b.data {
|
||||
ks[i] = k
|
||||
i++
|
||||
idx := indexForBit(value)
|
||||
if idx >= len(b.data) {
|
||||
return false
|
||||
}
|
||||
sort.Ints(ks)
|
||||
return ks
|
||||
return (b.data[idx] & maskForBit(value)) != 0
|
||||
}
|
||||
|
||||
func (b *BitSet) minValue() int {
|
||||
min := 2147483647
|
||||
|
||||
for k := range b.data {
|
||||
if k < min {
|
||||
min = k
|
||||
for i, v := range b.data {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
return i*bitsPerWord + bits.TrailingZeros64(v)
|
||||
}
|
||||
|
||||
return min
|
||||
return 2147483647
|
||||
}
|
||||
|
||||
func (b *BitSet) equals(other interface{}) bool {
|
||||
@@ -132,12 +165,22 @@ func (b *BitSet) equals(other interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(b.data) != len(otherBitSet.data) {
|
||||
if b == otherBitSet {
|
||||
return true
|
||||
}
|
||||
|
||||
// We only compare set bits, so we cannot rely on the two slices having the same size. Its
|
||||
// possible for two BitSets to have different slice lengths but the same set bits. So we only
|
||||
// compare the relavent words and ignore the trailing zeros.
|
||||
bLen := b.minLen()
|
||||
otherLen := otherBitSet.minLen()
|
||||
|
||||
if bLen != otherLen {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range b.data {
|
||||
if otherBitSet.data[k] != v {
|
||||
for i := 0; i < bLen; i++ {
|
||||
if b.data[i] != otherBitSet.data[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -145,18 +188,35 @@ func (b *BitSet) equals(other interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BitSet) minLen() int {
|
||||
for i := len(b.data); i > 0; i-- {
|
||||
if b.data[i-1] != 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (b *BitSet) length() int {
|
||||
return len(b.data)
|
||||
cnt := 0
|
||||
for _, val := range b.data {
|
||||
cnt += bits.OnesCount64(val)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func (b *BitSet) String() string {
|
||||
vals := b.values()
|
||||
valsS := make([]string, len(vals))
|
||||
vals := make([]string, 0, b.length())
|
||||
|
||||
for i, val := range vals {
|
||||
valsS[i] = strconv.Itoa(val)
|
||||
for i, v := range b.data {
|
||||
for v != 0 {
|
||||
n := bits.TrailingZeros64(v)
|
||||
vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
|
||||
v &= ^(uint64(1) << n)
|
||||
}
|
||||
}
|
||||
return "{" + strings.Join(valsS, ", ") + "}"
|
||||
|
||||
return "{" + strings.Join(vals, ", ") + "}"
|
||||
}
|
||||
|
||||
type AltDict struct {
|
||||
|
38
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
generated
vendored
38
vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
generated
vendored
@@ -8,7 +8,7 @@ const (
|
||||
_loadFactor = 0.75
|
||||
)
|
||||
|
||||
var _ Set = (*Array2DHashSet)(nil)
|
||||
var _ Set = (*array2DHashSet)(nil)
|
||||
|
||||
type Set interface {
|
||||
Add(value interface{}) (added interface{})
|
||||
@@ -19,7 +19,7 @@ type Set interface {
|
||||
Each(f func(interface{}) bool)
|
||||
}
|
||||
|
||||
type Array2DHashSet struct {
|
||||
type array2DHashSet struct {
|
||||
buckets [][]interface{}
|
||||
hashcodeFunction func(interface{}) int
|
||||
equalsFunction func(interface{}, interface{}) bool
|
||||
@@ -31,7 +31,7 @@ type Array2DHashSet struct {
|
||||
initialBucketCapacity int
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Each(f func(interface{}) bool) {
|
||||
func (as *array2DHashSet) Each(f func(interface{}) bool) {
|
||||
if as.Len() < 1 {
|
||||
return
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func (as *Array2DHashSet) Each(f func(interface{}) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Values() []interface{} {
|
||||
func (as *array2DHashSet) Values() []interface{} {
|
||||
if as.Len() < 1 {
|
||||
return nil
|
||||
}
|
||||
@@ -61,18 +61,18 @@ func (as *Array2DHashSet) Values() []interface{} {
|
||||
return values
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Contains(value interface{}) bool {
|
||||
func (as *array2DHashSet) Contains(value interface{}) bool {
|
||||
return as.Get(value) != nil
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Add(value interface{}) interface{} {
|
||||
func (as *array2DHashSet) Add(value interface{}) interface{} {
|
||||
if as.n > as.threshold {
|
||||
as.expand()
|
||||
}
|
||||
return as.innerAdd(value)
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) expand() {
|
||||
func (as *array2DHashSet) expand() {
|
||||
old := as.buckets
|
||||
|
||||
as.currentPrime += 4
|
||||
@@ -120,11 +120,11 @@ func (as *Array2DHashSet) expand() {
|
||||
}
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Len() int {
|
||||
func (as *array2DHashSet) Len() int {
|
||||
return as.n
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) Get(o interface{}) interface{} {
|
||||
func (as *array2DHashSet) Get(o interface{}) interface{} {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func (as *Array2DHashSet) Get(o interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
b := as.getBuckets(o)
|
||||
|
||||
bucket := as.buckets[b]
|
||||
@@ -187,25 +187,25 @@ func (as *Array2DHashSet) innerAdd(o interface{}) interface{} {
|
||||
return o
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) getBuckets(value interface{}) int {
|
||||
func (as *array2DHashSet) getBuckets(value interface{}) int {
|
||||
hash := as.hashcodeFunction(value)
|
||||
return hash & (len(as.buckets) - 1)
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) createBuckets(cap int) [][]interface{} {
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
|
||||
return make([][]interface{}, cap)
|
||||
}
|
||||
|
||||
func (as *Array2DHashSet) createBucket(cap int) []interface{} {
|
||||
func (as *array2DHashSet) createBucket(cap int) []interface{} {
|
||||
return make([]interface{}, cap)
|
||||
}
|
||||
|
||||
func NewArray2DHashSetWithCap(
|
||||
func newArray2DHashSetWithCap(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
initCap int,
|
||||
initBucketCap int,
|
||||
) *Array2DHashSet {
|
||||
) *array2DHashSet {
|
||||
if hashcodeFunction == nil {
|
||||
hashcodeFunction = standardHashFunction
|
||||
}
|
||||
@@ -214,7 +214,7 @@ func NewArray2DHashSetWithCap(
|
||||
equalsFunction = standardEqualsFunction
|
||||
}
|
||||
|
||||
ret := &Array2DHashSet{
|
||||
ret := &array2DHashSet{
|
||||
hashcodeFunction: hashcodeFunction,
|
||||
equalsFunction: equalsFunction,
|
||||
|
||||
@@ -229,9 +229,9 @@ func NewArray2DHashSetWithCap(
|
||||
return ret
|
||||
}
|
||||
|
||||
func NewArray2DHashSet(
|
||||
func newArray2DHashSet(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(interface{}, interface{}) bool,
|
||||
) *Array2DHashSet {
|
||||
return NewArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
) *array2DHashSet {
|
||||
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
}
|
||||
|
Reference in New Issue
Block a user