Merge pull request #2088 from stevvooe/update-toml
vendor: update BurntSushi/toml for MIT license
This commit is contained in:
		| @@ -32,7 +32,7 @@ golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/gol | ||||
| github.com/opencontainers/image-spec v1.0.1 | ||||
| github.com/containerd/continuity cf279e6ac893682272b4479d4c67fd3abf878b4e | ||||
| golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c | ||||
| github.com/BurntSushi/toml v0.2.0-21-g9906417 | ||||
| github.com/BurntSushi/toml v0.3.0 | ||||
| github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 | ||||
| github.com/Microsoft/go-winio v0.4.5 | ||||
| github.com/Microsoft/hcsshim v0.6.7 | ||||
|   | ||||
							
								
								
									
										10
									
								
								vendor/github.com/BurntSushi/toml/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/BurntSushi/toml/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -6,12 +6,12 @@ packages. This package also supports the `encoding.TextUnmarshaler` and | ||||
| `encoding.TextMarshaler` interfaces so that you can define custom data | ||||
| representations. (There is an example of this below.) | ||||
|  | ||||
| Spec: https://github.com/mojombo/toml | ||||
| Spec: https://github.com/toml-lang/toml | ||||
|  | ||||
| Compatible with TOML version | ||||
| [v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) | ||||
| [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) | ||||
|  | ||||
| Documentation: http://godoc.org/github.com/BurntSushi/toml | ||||
| Documentation: https://godoc.org/github.com/BurntSushi/toml | ||||
|  | ||||
| Installation: | ||||
|  | ||||
| @@ -26,8 +26,7 @@ go get github.com/BurntSushi/toml/cmd/tomlv | ||||
| tomlv some-toml-file.toml | ||||
| ``` | ||||
|  | ||||
| [](https://travis-ci.org/BurntSushi/toml) | ||||
|  | ||||
| [](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml) | ||||
|  | ||||
| ### Testing | ||||
|  | ||||
| @@ -217,4 +216,3 @@ Note that a case insensitive match will be tried if an exact match can't be | ||||
| found. | ||||
|  | ||||
| A working example of the above can be found in `_examples/example.{go,toml}`. | ||||
|  | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/BurntSushi/toml/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/BurntSushi/toml/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with | ||||
| the Primitive type, and querying the set of keys in a TOML document with the | ||||
| MetaData type. | ||||
|  | ||||
| The specification implemented: https://github.com/mojombo/toml | ||||
| The specification implemented: https://github.com/toml-lang/toml | ||||
|  | ||||
| The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify | ||||
| whether a file is a valid TOML document. It can also be used to print the | ||||
|   | ||||
							
								
								
									
										2
									
								
								vendor/github.com/BurntSushi/toml/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/BurntSushi/toml/encode.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { | ||||
| func (enc *Encoder) eTable(key Key, rv reflect.Value) { | ||||
| 	panicIfInvalidKey(key) | ||||
| 	if len(key) == 1 { | ||||
| 		// Output an extra new line between top-level tables. | ||||
| 		// Output an extra newline between top-level tables. | ||||
| 		// (The newline isn't written if nothing else has been written though.) | ||||
| 		enc.newline() | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										261
									
								
								vendor/github.com/BurntSushi/toml/lex.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										261
									
								
								vendor/github.com/BurntSushi/toml/lex.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -30,24 +30,28 @@ const ( | ||||
| 	itemArrayTableEnd | ||||
| 	itemKeyStart | ||||
| 	itemCommentStart | ||||
| 	itemInlineTableStart | ||||
| 	itemInlineTableEnd | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	eof             = 0 | ||||
| 	tableStart      = '[' | ||||
| 	tableEnd        = ']' | ||||
| 	arrayTableStart = '[' | ||||
| 	arrayTableEnd   = ']' | ||||
| 	tableSep        = '.' | ||||
| 	keySep          = '=' | ||||
| 	arrayStart      = '[' | ||||
| 	arrayEnd        = ']' | ||||
| 	arrayValTerm    = ',' | ||||
| 	commentStart    = '#' | ||||
| 	stringStart     = '"' | ||||
| 	stringEnd       = '"' | ||||
| 	rawStringStart  = '\'' | ||||
| 	rawStringEnd    = '\'' | ||||
| 	eof              = 0 | ||||
| 	comma            = ',' | ||||
| 	tableStart       = '[' | ||||
| 	tableEnd         = ']' | ||||
| 	arrayTableStart  = '[' | ||||
| 	arrayTableEnd    = ']' | ||||
| 	tableSep         = '.' | ||||
| 	keySep           = '=' | ||||
| 	arrayStart       = '[' | ||||
| 	arrayEnd         = ']' | ||||
| 	commentStart     = '#' | ||||
| 	stringStart      = '"' | ||||
| 	stringEnd        = '"' | ||||
| 	rawStringStart   = '\'' | ||||
| 	rawStringEnd     = '\'' | ||||
| 	inlineTableStart = '{' | ||||
| 	inlineTableEnd   = '}' | ||||
| ) | ||||
|  | ||||
| type stateFn func(lx *lexer) stateFn | ||||
| @@ -56,11 +60,18 @@ type lexer struct { | ||||
| 	input string | ||||
| 	start int | ||||
| 	pos   int | ||||
| 	width int | ||||
| 	line  int | ||||
| 	state stateFn | ||||
| 	items chan item | ||||
|  | ||||
| 	// Allow for backing up up to three runes. | ||||
| 	// This is necessary because TOML contains 3-rune tokens (""" and '''). | ||||
| 	prevWidths [3]int | ||||
| 	nprev      int // how many of prevWidths are in use | ||||
| 	// If we emit an eof, we can still back up, but it is not OK to call | ||||
| 	// next again. | ||||
| 	atEOF bool | ||||
|  | ||||
| 	// A stack of state functions used to maintain context. | ||||
| 	// The idea is to reuse parts of the state machine in various places. | ||||
| 	// For example, values can appear at the top level or within arbitrarily | ||||
| @@ -88,7 +99,7 @@ func (lx *lexer) nextItem() item { | ||||
|  | ||||
| func lex(input string) *lexer { | ||||
| 	lx := &lexer{ | ||||
| 		input: input + "\n", | ||||
| 		input: input, | ||||
| 		state: lexTop, | ||||
| 		line:  1, | ||||
| 		items: make(chan item, 10), | ||||
| @@ -103,7 +114,7 @@ func (lx *lexer) push(state stateFn) { | ||||
|  | ||||
| func (lx *lexer) pop() stateFn { | ||||
| 	if len(lx.stack) == 0 { | ||||
| 		return lx.errorf("BUG in lexer: no states to pop.") | ||||
| 		return lx.errorf("BUG in lexer: no states to pop") | ||||
| 	} | ||||
| 	last := lx.stack[len(lx.stack)-1] | ||||
| 	lx.stack = lx.stack[0 : len(lx.stack)-1] | ||||
| @@ -125,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) { | ||||
| } | ||||
|  | ||||
| func (lx *lexer) next() (r rune) { | ||||
| 	if lx.atEOF { | ||||
| 		panic("next called after EOF") | ||||
| 	} | ||||
| 	if lx.pos >= len(lx.input) { | ||||
| 		lx.width = 0 | ||||
| 		lx.atEOF = true | ||||
| 		return eof | ||||
| 	} | ||||
|  | ||||
| 	if lx.input[lx.pos] == '\n' { | ||||
| 		lx.line++ | ||||
| 	} | ||||
| 	r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) | ||||
| 	lx.pos += lx.width | ||||
| 	lx.prevWidths[2] = lx.prevWidths[1] | ||||
| 	lx.prevWidths[1] = lx.prevWidths[0] | ||||
| 	if lx.nprev < 3 { | ||||
| 		lx.nprev++ | ||||
| 	} | ||||
| 	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) | ||||
| 	lx.prevWidths[0] = w | ||||
| 	lx.pos += w | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| @@ -143,9 +163,20 @@ func (lx *lexer) ignore() { | ||||
| 	lx.start = lx.pos | ||||
| } | ||||
|  | ||||
| // backup steps back one rune. Can be called only once per call of next. | ||||
| // backup steps back one rune. Can be called only twice between calls to next. | ||||
| func (lx *lexer) backup() { | ||||
| 	lx.pos -= lx.width | ||||
| 	if lx.atEOF { | ||||
| 		lx.atEOF = false | ||||
| 		return | ||||
| 	} | ||||
| 	if lx.nprev < 1 { | ||||
| 		panic("backed up too far") | ||||
| 	} | ||||
| 	w := lx.prevWidths[0] | ||||
| 	lx.prevWidths[0] = lx.prevWidths[1] | ||||
| 	lx.prevWidths[1] = lx.prevWidths[2] | ||||
| 	lx.nprev-- | ||||
| 	lx.pos -= w | ||||
| 	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { | ||||
| 		lx.line-- | ||||
| 	} | ||||
| @@ -182,7 +213,7 @@ func (lx *lexer) skip(pred func(rune) bool) { | ||||
|  | ||||
| // errorf stops all lexing by emitting an error and returning `nil`. | ||||
| // Note that any value that is a character is escaped if it's a special | ||||
| // character (new lines, tabs, etc.). | ||||
| // character (newlines, tabs, etc.). | ||||
| func (lx *lexer) errorf(format string, values ...interface{}) stateFn { | ||||
| 	lx.items <- item{ | ||||
| 		itemError, | ||||
| @@ -198,7 +229,6 @@ func lexTop(lx *lexer) stateFn { | ||||
| 	if isWhitespace(r) || isNL(r) { | ||||
| 		return lexSkip(lx, lexTop) | ||||
| 	} | ||||
|  | ||||
| 	switch r { | ||||
| 	case commentStart: | ||||
| 		lx.push(lexTop) | ||||
| @@ -207,7 +237,7 @@ func lexTop(lx *lexer) stateFn { | ||||
| 		return lexTableStart | ||||
| 	case eof: | ||||
| 		if lx.pos > lx.start { | ||||
| 			return lx.errorf("Unexpected EOF.") | ||||
| 			return lx.errorf("unexpected EOF") | ||||
| 		} | ||||
| 		lx.emit(itemEOF) | ||||
| 		return nil | ||||
| @@ -222,12 +252,12 @@ func lexTop(lx *lexer) stateFn { | ||||
|  | ||||
| // lexTopEnd is entered whenever a top-level item has been consumed. (A value | ||||
| // or a table.) It must see only whitespace, and will turn back to lexTop | ||||
| // upon a new line. If it sees EOF, it will quit the lexer successfully. | ||||
| // upon a newline. If it sees EOF, it will quit the lexer successfully. | ||||
| func lexTopEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == commentStart: | ||||
| 		// a comment will read to a new line for us. | ||||
| 		// a comment will read to a newline for us. | ||||
| 		lx.push(lexTop) | ||||
| 		return lexCommentStart | ||||
| 	case isWhitespace(r): | ||||
| @@ -236,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn { | ||||
| 		lx.ignore() | ||||
| 		return lexTop | ||||
| 	case r == eof: | ||||
| 		lx.ignore() | ||||
| 		return lexTop | ||||
| 		lx.emit(itemEOF) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return lx.errorf("Expected a top-level item to end with a new line, "+ | ||||
| 		"comment or EOF, but got %q instead.", r) | ||||
| 	return lx.errorf("expected a top-level item to end with a newline, "+ | ||||
| 		"comment, or EOF, but got %q instead", r) | ||||
| } | ||||
|  | ||||
| // lexTable lexes the beginning of a table. Namely, it makes sure that | ||||
| @@ -267,8 +297,8 @@ func lexTableEnd(lx *lexer) stateFn { | ||||
|  | ||||
| func lexArrayTableEnd(lx *lexer) stateFn { | ||||
| 	if r := lx.next(); r != arrayTableEnd { | ||||
| 		return lx.errorf("Expected end of table array name delimiter %q, "+ | ||||
| 			"but got %q instead.", arrayTableEnd, r) | ||||
| 		return lx.errorf("expected end of table array name delimiter %q, "+ | ||||
| 			"but got %q instead", arrayTableEnd, r) | ||||
| 	} | ||||
| 	lx.emit(itemArrayTableEnd) | ||||
| 	return lexTopEnd | ||||
| @@ -278,11 +308,11 @@ func lexTableNameStart(lx *lexer) stateFn { | ||||
| 	lx.skip(isWhitespace) | ||||
| 	switch r := lx.peek(); { | ||||
| 	case r == tableEnd || r == eof: | ||||
| 		return lx.errorf("Unexpected end of table name. (Table names cannot " + | ||||
| 			"be empty.)") | ||||
| 		return lx.errorf("unexpected end of table name " + | ||||
| 			"(table names cannot be empty)") | ||||
| 	case r == tableSep: | ||||
| 		return lx.errorf("Unexpected table separator. (Table names cannot " + | ||||
| 			"be empty.)") | ||||
| 		return lx.errorf("unexpected table separator " + | ||||
| 			"(table names cannot be empty)") | ||||
| 	case r == stringStart || r == rawStringStart: | ||||
| 		lx.ignore() | ||||
| 		lx.push(lexTableNameEnd) | ||||
| @@ -317,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn { | ||||
| 	case r == tableEnd: | ||||
| 		return lx.pop() | ||||
| 	default: | ||||
| 		return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ | ||||
| 			"instead.", r) | ||||
| 		return lx.errorf("expected '.' or ']' to end table name, "+ | ||||
| 			"but got %q instead", r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -328,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn { | ||||
| 	r := lx.peek() | ||||
| 	switch { | ||||
| 	case r == keySep: | ||||
| 		return lx.errorf("Unexpected key separator %q.", keySep) | ||||
| 		return lx.errorf("unexpected key separator %q", keySep) | ||||
| 	case isWhitespace(r) || isNL(r): | ||||
| 		lx.next() | ||||
| 		return lexSkip(lx, lexKeyStart) | ||||
| @@ -359,7 +389,7 @@ func lexBareKey(lx *lexer) stateFn { | ||||
| 		lx.emit(itemText) | ||||
| 		return lexKeyEnd | ||||
| 	default: | ||||
| 		return lx.errorf("Bare keys cannot contain %q.", r) | ||||
| 		return lx.errorf("bare keys cannot contain %q", r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -372,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexKeyEnd) | ||||
| 	default: | ||||
| 		return lx.errorf("Expected key separator %q, but got %q instead.", | ||||
| 		return lx.errorf("expected key separator %q, but got %q instead", | ||||
| 			keySep, r) | ||||
| 	} | ||||
| } | ||||
| @@ -381,9 +411,8 @@ func lexKeyEnd(lx *lexer) stateFn { | ||||
| // lexValue will ignore whitespace. | ||||
| // After a value is lexed, the last state on the next is popped and returned. | ||||
| func lexValue(lx *lexer) stateFn { | ||||
| 	// We allow whitespace to precede a value, but NOT new lines. | ||||
| 	// In array syntax, the array states are responsible for ignoring new | ||||
| 	// lines. | ||||
| 	// We allow whitespace to precede a value, but NOT newlines. | ||||
| 	// In array syntax, the array states are responsible for ignoring newlines. | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| @@ -397,6 +426,10 @@ func lexValue(lx *lexer) stateFn { | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemArray) | ||||
| 		return lexArrayValue | ||||
| 	case inlineTableStart: | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemInlineTableStart) | ||||
| 		return lexInlineTableValue | ||||
| 	case stringStart: | ||||
| 		if lx.accept(stringStart) { | ||||
| 			if lx.accept(stringStart) { | ||||
| @@ -420,7 +453,7 @@ func lexValue(lx *lexer) stateFn { | ||||
| 	case '+', '-': | ||||
| 		return lexNumberStart | ||||
| 	case '.': // special error case, be kind to users | ||||
| 		return lx.errorf("Floats must start with a digit, not '.'.") | ||||
| 		return lx.errorf("floats must start with a digit, not '.'") | ||||
| 	} | ||||
| 	if unicode.IsLetter(r) { | ||||
| 		// Be permissive here; lexBool will give a nice error if the | ||||
| @@ -430,11 +463,11 @@ func lexValue(lx *lexer) stateFn { | ||||
| 		lx.backup() | ||||
| 		return lexBool | ||||
| 	} | ||||
| 	return lx.errorf("Expected value but found %q instead.", r) | ||||
| 	return lx.errorf("expected value but found %q instead", r) | ||||
| } | ||||
|  | ||||
| // lexArrayValue consumes one value in an array. It assumes that '[' or ',' | ||||
| // have already been consumed. All whitespace and new lines are ignored. | ||||
| // have already been consumed. All whitespace and newlines are ignored. | ||||
| func lexArrayValue(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| @@ -443,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn { | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexArrayValue) | ||||
| 		return lexCommentStart | ||||
| 	case r == arrayValTerm: | ||||
| 		return lx.errorf("Unexpected array value terminator %q.", | ||||
| 			arrayValTerm) | ||||
| 	case r == comma: | ||||
| 		return lx.errorf("unexpected comma") | ||||
| 	case r == arrayEnd: | ||||
| 		// NOTE(caleb): The spec isn't clear about whether you can have | ||||
| 		// a trailing comma or not, so we'll allow it. | ||||
| 		return lexArrayEnd | ||||
| 	} | ||||
|  | ||||
| @@ -455,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn { | ||||
| 	return lexValue | ||||
| } | ||||
|  | ||||
| // lexArrayValueEnd consumes the cruft between values of an array. Namely, | ||||
| // it ignores whitespace and expects either a ',' or a ']'. | ||||
| // lexArrayValueEnd consumes everything between the end of an array value and | ||||
| // the next value (or the end of the array): it ignores whitespace and newlines | ||||
| // and expects either a ',' or a ']'. | ||||
| func lexArrayValueEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| @@ -465,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn { | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexArrayValueEnd) | ||||
| 		return lexCommentStart | ||||
| 	case r == arrayValTerm: | ||||
| 	case r == comma: | ||||
| 		lx.ignore() | ||||
| 		return lexArrayValue // move on to the next value | ||||
| 	case r == arrayEnd: | ||||
| 		return lexArrayEnd | ||||
| 	} | ||||
| 	return lx.errorf("Expected an array value terminator %q or an array "+ | ||||
| 		"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) | ||||
| 	return lx.errorf( | ||||
| 		"expected a comma or array terminator %q, but got %q instead", | ||||
| 		arrayEnd, r, | ||||
| 	) | ||||
| } | ||||
|  | ||||
| // lexArrayEnd finishes the lexing of an array. It assumes that a ']' has | ||||
| // just been consumed. | ||||
| // lexArrayEnd finishes the lexing of an array. | ||||
| // It assumes that a ']' has just been consumed. | ||||
| func lexArrayEnd(lx *lexer) stateFn { | ||||
| 	lx.ignore() | ||||
| 	lx.emit(itemArrayEnd) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexInlineTableValue consumes one key/value pair in an inline table. | ||||
| // It assumes that '{' or ',' have already been consumed. Whitespace is ignored. | ||||
| func lexInlineTableValue(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexInlineTableValue) | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("newlines not allowed within inline tables") | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexInlineTableValue) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		return lx.errorf("unexpected comma") | ||||
| 	case r == inlineTableEnd: | ||||
| 		return lexInlineTableEnd | ||||
| 	} | ||||
| 	lx.backup() | ||||
| 	lx.push(lexInlineTableValueEnd) | ||||
| 	return lexKeyStart | ||||
| } | ||||
|  | ||||
| // lexInlineTableValueEnd consumes everything between the end of an inline table | ||||
| // key/value pair and the next pair (or the end of the table): | ||||
| // it ignores whitespace and expects either a ',' or a '}'. | ||||
| func lexInlineTableValueEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexInlineTableValueEnd) | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("newlines not allowed within inline tables") | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexInlineTableValueEnd) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		lx.ignore() | ||||
| 		return lexInlineTableValue | ||||
| 	case r == inlineTableEnd: | ||||
| 		return lexInlineTableEnd | ||||
| 	} | ||||
| 	return lx.errorf("expected a comma or an inline table terminator %q, "+ | ||||
| 		"but got %q instead", inlineTableEnd, r) | ||||
| } | ||||
|  | ||||
| // lexInlineTableEnd finishes the lexing of an inline table. | ||||
| // It assumes that a '}' has just been consumed. | ||||
| func lexInlineTableEnd(lx *lexer) stateFn { | ||||
| 	lx.ignore() | ||||
| 	lx.emit(itemInlineTableEnd) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexString consumes the inner contents of a string. It assumes that the | ||||
| // beginning '"' has already been consumed and ignored. | ||||
| func lexString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("Strings cannot contain new lines.") | ||||
| 		return lx.errorf("strings cannot contain newlines") | ||||
| 	case r == '\\': | ||||
| 		lx.push(lexString) | ||||
| 		return lexStringEscape | ||||
| @@ -506,11 +598,12 @@ func lexString(lx *lexer) stateFn { | ||||
| // lexMultilineString consumes the inner contents of a string. It assumes that | ||||
| // the beginning '"""' has already been consumed and ignored. | ||||
| func lexMultilineString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == '\\': | ||||
| 	switch lx.next() { | ||||
| 	case eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case '\\': | ||||
| 		return lexMultilineStringEscape | ||||
| 	case r == stringEnd: | ||||
| 	case stringEnd: | ||||
| 		if lx.accept(stringEnd) { | ||||
| 			if lx.accept(stringEnd) { | ||||
| 				lx.backup() | ||||
| @@ -534,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn { | ||||
| func lexRawString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("Strings cannot contain new lines.") | ||||
| 		return lx.errorf("strings cannot contain newlines") | ||||
| 	case r == rawStringEnd: | ||||
| 		lx.backup() | ||||
| 		lx.emit(itemRawString) | ||||
| @@ -547,12 +642,13 @@ func lexRawString(lx *lexer) stateFn { | ||||
| } | ||||
|  | ||||
| // lexMultilineRawString consumes a raw string. Nothing can be escaped in such | ||||
| // a string. It assumes that the beginning "'" has already been consumed and | ||||
| // a string. It assumes that the beginning "'''" has already been consumed and | ||||
| // ignored. | ||||
| func lexMultilineRawString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == rawStringEnd: | ||||
| 	switch lx.next() { | ||||
| 	case eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case rawStringEnd: | ||||
| 		if lx.accept(rawStringEnd) { | ||||
| 			if lx.accept(rawStringEnd) { | ||||
| 				lx.backup() | ||||
| @@ -605,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn { | ||||
| 	case 'U': | ||||
| 		return lexLongUnicodeEscape | ||||
| 	} | ||||
| 	return lx.errorf("Invalid escape character %q. Only the following "+ | ||||
| 	return lx.errorf("invalid escape character %q; only the following "+ | ||||
| 		"escape characters are allowed: "+ | ||||
| 		"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ | ||||
| 		"\\uXXXX and \\UXXXXXXXX.", r) | ||||
| 		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) | ||||
| } | ||||
|  | ||||
| func lexShortUnicodeEscape(lx *lexer) stateFn { | ||||
| @@ -616,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { | ||||
| 	for i := 0; i < 4; i++ { | ||||
| 		r = lx.next() | ||||
| 		if !isHexadecimal(r) { | ||||
| 			return lx.errorf("Expected four hexadecimal digits after '\\u', "+ | ||||
| 				"but got '%s' instead.", lx.current()) | ||||
| 			return lx.errorf(`expected four hexadecimal digits after '\u', `+ | ||||
| 				"but got %q instead", lx.current()) | ||||
| 		} | ||||
| 	} | ||||
| 	return lx.pop() | ||||
| @@ -628,8 +723,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { | ||||
| 	for i := 0; i < 8; i++ { | ||||
| 		r = lx.next() | ||||
| 		if !isHexadecimal(r) { | ||||
| 			return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ | ||||
| 				"but got '%s' instead.", lx.current()) | ||||
| 			return lx.errorf(`expected eight hexadecimal digits after '\U', `+ | ||||
| 				"but got %q instead", lx.current()) | ||||
| 		} | ||||
| 	} | ||||
| 	return lx.pop() | ||||
| @@ -647,9 +742,9 @@ func lexNumberOrDateStart(lx *lexer) stateFn { | ||||
| 	case 'e', 'E': | ||||
| 		return lexFloat | ||||
| 	case '.': | ||||
| 		return lx.errorf("Floats must start with a digit, not '.'.") | ||||
| 		return lx.errorf("floats must start with a digit, not '.'") | ||||
| 	} | ||||
| 	return lx.errorf("Expected a digit but got %q.", r) | ||||
| 	return lx.errorf("expected a digit but got %q", r) | ||||
| } | ||||
|  | ||||
| // lexNumberOrDate consumes either an integer, float or datetime. | ||||
| @@ -697,9 +792,9 @@ func lexNumberStart(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if !isDigit(r) { | ||||
| 		if r == '.' { | ||||
| 			return lx.errorf("Floats must start with a digit, not '.'.") | ||||
| 			return lx.errorf("floats must start with a digit, not '.'") | ||||
| 		} | ||||
| 		return lx.errorf("Expected a digit but got %q.", r) | ||||
| 		return lx.errorf("expected a digit but got %q", r) | ||||
| 	} | ||||
| 	return lexNumber | ||||
| } | ||||
| @@ -745,7 +840,7 @@ func lexBool(lx *lexer) stateFn { | ||||
| 	var rs []rune | ||||
| 	for { | ||||
| 		r := lx.next() | ||||
| 		if r == eof || isWhitespace(r) || isNL(r) { | ||||
| 		if !unicode.IsLetter(r) { | ||||
| 			lx.backup() | ||||
| 			break | ||||
| 		} | ||||
| @@ -757,7 +852,7 @@ func lexBool(lx *lexer) stateFn { | ||||
| 		lx.emit(itemBool) | ||||
| 		return lx.pop() | ||||
| 	} | ||||
| 	return lx.errorf("Expected value but found %q instead.", s) | ||||
| 	return lx.errorf("expected value but found %q instead", s) | ||||
| } | ||||
|  | ||||
| // lexCommentStart begins the lexing of a comment. It will emit | ||||
| @@ -769,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn { | ||||
| } | ||||
|  | ||||
| // lexComment lexes an entire comment. It assumes that '#' has been consumed. | ||||
| // It will consume *up to* the first new line character, and pass control | ||||
| // It will consume *up to* the first newline character, and pass control | ||||
| // back to the last state on the stack. | ||||
| func lexComment(lx *lexer) stateFn { | ||||
| 	r := lx.peek() | ||||
|   | ||||
							
								
								
									
										35
									
								
								vendor/github.com/BurntSushi/toml/parse.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/BurntSushi/toml/parse.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -269,6 +269,41 @@ func (p *parser) value(it item) (interface{}, tomlType) { | ||||
| 			types = append(types, typ) | ||||
| 		} | ||||
| 		return array, p.typeOfArray(types) | ||||
| 	case itemInlineTableStart: | ||||
| 		var ( | ||||
| 			hash         = make(map[string]interface{}) | ||||
| 			outerContext = p.context | ||||
| 			outerKey     = p.currentKey | ||||
| 		) | ||||
|  | ||||
| 		p.context = append(p.context, p.currentKey) | ||||
| 		p.currentKey = "" | ||||
| 		for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { | ||||
| 			if it.typ != itemKeyStart { | ||||
| 				p.bug("Expected key start but instead found %q, around line %d", | ||||
| 					it.val, p.approxLine) | ||||
| 			} | ||||
| 			if it.typ == itemCommentStart { | ||||
| 				p.expect(itemText) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			// retrieve key | ||||
| 			k := p.next() | ||||
| 			p.approxLine = k.line | ||||
| 			kname := p.keyString(k) | ||||
|  | ||||
| 			// retrieve value | ||||
| 			p.currentKey = kname | ||||
| 			val, typ := p.value(p.next()) | ||||
| 			// make sure we keep metadata up to date | ||||
| 			p.setType(kname, typ) | ||||
| 			p.ordered = append(p.ordered, p.context.add(p.currentKey)) | ||||
| 			hash[kname] = val | ||||
| 		} | ||||
| 		p.context = outerContext | ||||
| 		p.currentKey = outerKey | ||||
| 		return hash, tomlHash | ||||
| 	} | ||||
| 	p.bug("Unexpected value type: %s", it.typ) | ||||
| 	panic("unreachable") | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Derek McGowan
					Derek McGowan