filters: clean up implementation

Address a few cleanup items in the parser. Currently, we don't handle
compound values and we remove a panic when part of the input is not
consumed.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2017-06-20 17:50:06 -07:00
parent 80656bf8ca
commit 3d5ee9e8b8
No known key found for this signature in database
GPG Key ID: 67B3DED84EDC823F
4 changed files with 49 additions and 33 deletions

View File

@ -56,6 +56,12 @@ func TestFilters(t *testing.T) {
Name: "bazo",
Other: "abc",
},
{
Name: "compound",
Labels: map[string]string{
"foo": "omg_asdf.asdf-qwer",
},
},
}
var corpus []interface{}
@ -103,6 +109,7 @@ func TestFilters(t *testing.T) {
expected: []interface{}{
corpus[0],
corpus[2],
corpus[8],
},
},
{
@ -112,6 +119,13 @@ func TestFilters(t *testing.T) {
corpus[0],
},
},
{
name: "LabelValuePunctuated",
input: "labels.foo==omg_asdf.asdf-qwer",
expected: []interface{}{
corpus[8],
},
},
{
name: "Name",
input: "name==bar",
@ -130,6 +144,7 @@ func TestFilters(t *testing.T) {
corpus[5],
corpus[6],
corpus[7],
corpus[8],
},
},
{

View File

@ -72,7 +72,7 @@ loop:
switch tok {
case ',':
pos, tok, _ := p.scanner.scan()
if tok != tokenSelectorSeparator {
if tok != tokenSeparator {
return nil, p.mkerr(pos, "expected a separator")
}
@ -85,7 +85,7 @@ loop:
case tokenEOF:
break loop
default:
panic("unconsumed input")
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
}
}
@ -99,7 +99,7 @@ func (p *parser) selector() (selector, error) {
}
switch p.scanner.peek() {
case tokenSelectorSeparator, tokenEOF:
case tokenSeparator, tokenEOF:
return selector{
fieldpath: fieldpath,
operator: operatorPresent,
@ -140,7 +140,7 @@ loop:
switch tok {
case '.':
pos, tok, _ := p.scanner.scan() // consume separator
if tok != tokenFieldSeparator {
if tok != tokenSeparator {
return nil, p.mkerr(pos, "expected a field separator (`.`)")
}

View File

@ -11,9 +11,8 @@ const (
tokenQuoted
tokenValue
tokenField
tokenFieldSeparator
tokenSeparator
tokenOperator
tokenSelectorSeparator
tokenIllegal
)
@ -29,12 +28,10 @@ func (t token) String() string {
return "Value"
case tokenField:
return "Field"
case tokenSeparator:
return "Separator"
case tokenOperator:
return "Operator"
case tokenFieldSeparator:
return "FieldSeparator"
case tokenSelectorSeparator:
return "SelectorSeparator"
case tokenIllegal:
return "Illegal"
}
@ -102,12 +99,10 @@ chomp:
case ch == tokenEOF:
case ch == tokenIllegal:
case isQuoteRune(ch):
s.scanString(ch)
s.scanQuoted(ch)
return pos, tokenQuoted, s.input[pos:s.ppos]
case ch == ',':
return pos, tokenSelectorSeparator, s.input[pos:s.ppos]
case ch == '.':
return pos, tokenFieldSeparator, s.input[pos:s.ppos]
case isSeparatorRune(ch):
return pos, tokenSeparator, s.input[pos:s.ppos]
case isOperatorRune(ch):
s.scanOperator()
s.value = true
@ -119,12 +114,6 @@ chomp:
goto chomp
case s.value:
s.scanValue()
// TODO(stevvooe): We can get rid of the value flag by by having a
// scanUnquoted that accumulates characters. If it is a legal field,
// then we return a field token. The parser can then treat fields as
// values. This will allow the default case here to just scan value or
// field.
s.value = false
return pos, tokenValue, s.input[pos:s.ppos]
case isFieldRune(ch):
@ -167,7 +156,7 @@ func (s *scanner) scanValue() {
}
}
func (s *scanner) scanString(quote rune) {
func (s *scanner) scanQuoted(quote rune) {
ch := s.next() // read character after quote
for ch != quote {
if ch == '\n' || ch < 0 {

View File

@ -38,7 +38,7 @@ func TestScanner(t *testing.T) {
{pos: 0, token: tokenField, text: "name"},
{pos: 4, token: tokenOperator, text: "=="},
{pos: 6, token: tokenValue, text: "value"},
{pos: 11, token: tokenSelectorSeparator, text: ","},
{pos: 11, token: tokenSeparator, text: ","},
{pos: 12, token: tokenField, text: "foo"},
{pos: 15, token: tokenOperator, text: "!="},
{pos: 17, token: tokenValue, text: "bar"},
@ -52,15 +52,15 @@ func TestScanner(t *testing.T) {
{pos: 0, token: tokenField, text: "name"},
{pos: 4, token: tokenOperator, text: "=="},
{pos: 6, token: tokenValue, text: "value"},
{pos: 11, token: tokenSelectorSeparator, text: ","},
{pos: 11, token: tokenSeparator, text: ","},
{pos: 12, token: tokenField, text: "labels"},
{pos: 18, token: tokenFieldSeparator, text: "."},
{pos: 18, token: tokenSeparator, text: "."},
{pos: 19, token: tokenField, text: "foo"},
{pos: 22, token: tokenOperator, text: "="},
{pos: 23, token: tokenValue, text: "value"},
{pos: 28, token: tokenSelectorSeparator, text: ","},
{pos: 28, token: tokenSeparator, text: ","},
{pos: 29, token: tokenField, text: "other"},
{pos: 34, token: tokenFieldSeparator, text: "."},
{pos: 34, token: tokenSeparator, text: "."},
{pos: 35, token: tokenField, text: "bar"},
{pos: 38, token: tokenOperator, text: "~="},
{pos: 40, token: tokenValue, text: "match"},
@ -74,7 +74,7 @@ func TestScanner(t *testing.T) {
{pos: 0, token: tokenField, text: "name"},
{pos: 4, token: tokenOperator, text: "~="},
{pos: 6, token: tokenValue, text: "[abc]+"},
{pos: 12, token: tokenSelectorSeparator, text: ","},
{pos: 12, token: tokenSeparator, text: ","},
{pos: 13, token: tokenField, text: "foo"},
{pos: 16, token: tokenOperator, text: "="},
{pos: 17, token: tokenValue, text: "test"},
@ -88,7 +88,7 @@ func TestScanner(t *testing.T) {
{pos: 0, token: tokenField, text: "name"},
{pos: 4, token: tokenOperator, text: "~="},
{pos: 6, token: tokenValue, text: "[abc]\\+"},
{pos: 13, token: tokenSelectorSeparator, text: ","},
{pos: 13, token: tokenSeparator, text: ","},
{pos: 14, token: tokenField, text: "foo"},
{pos: 17, token: tokenOperator, text: "="},
{pos: 18, token: tokenValue, text: "test"},
@ -102,9 +102,9 @@ func TestScanner(t *testing.T) {
{pos: 0, token: tokenField, text: "name"},
{pos: 4, token: tokenOperator, text: "~="},
{pos: 6, token: tokenValue, text: "牛"},
{pos: 9, token: tokenSelectorSeparator, text: ","},
{pos: 9, token: tokenSeparator, text: ","},
{pos: 10, token: tokenField, text: "labels"},
{pos: 16, token: tokenFieldSeparator, text: "."},
{pos: 16, token: tokenSeparator, text: "."},
{pos: 17, token: tokenField, text: "moo"},
{pos: 20, token: tokenOperator, text: "="},
{pos: 21, token: tokenValue, text: "true"},
@ -141,6 +141,18 @@ func TestScanner(t *testing.T) {
{pos: 13, token: tokenEOF},
},
},
{
name: "ValuesPunctauted",
input: "compound.labels==punctuated_value.foo-bar",
expected: []tokenResult{
{pos: 0, token: tokenField, text: "compound"},
{pos: 8, token: tokenSeparator, text: "."},
{pos: 9, token: tokenField, text: "labels"},
{pos: 15, token: tokenOperator, text: "=="},
{pos: 17, token: tokenValue, text: "punctuated_value.foo-bar"},
{pos: 41, token: tokenEOF},
},
},
{
name: "PartialInput",
input: "interrupted=",
@ -166,7 +178,7 @@ func TestScanner(t *testing.T) {
input: `"leading quote".postquote==value`,
expected: []tokenResult{
{pos: 0, token: tokenQuoted, text: "\"leading quote\""},
{pos: 15, token: tokenFieldSeparator, text: "."},
{pos: 15, token: tokenSeparator, text: "."},
{pos: 16, token: tokenField, text: "postquote"},
{pos: 25, token: tokenOperator, text: "=="},
{pos: 27, token: tokenValue, text: "value"},
@ -179,7 +191,7 @@ func TestScanner(t *testing.T) {
expected: []tokenResult{
{pos: 0, token: tokenField, text: "input"},
{pos: 5, token: tokenOperator, text: "=="},
{pos: 7, token: tokenSelectorSeparator, text: ","},
{pos: 7, token: tokenSeparator, text: ","},
{pos: 8, token: tokenValue, text: "id?=ff"},
{pos: 14, token: tokenEOF},
},