*: Bump version of vmware/govmomi

Bumping version to include changes that
better handle TLS errors. Bump nescessary
to prepare for when the version of Go is
bumped to 1.20

Signed-off-by: Madhav Jivrajani <madhav.jiv@gmail.com>
This commit is contained in:
Madhav Jivrajani
2023-01-02 20:56:02 +05:30
parent 45df8f0bb3
commit 8b064fa4be
263 changed files with 32336 additions and 4373 deletions

View File

@@ -7,8 +7,8 @@
package xml
// References:
// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
// XML name spaces: http://www.w3.org/TR/REC-xml-names/
// Annotated XML spec: https://www.xml.com/axml/testaxml.htm
// XML name spaces: https://www.w3.org/TR/REC-xml-names/
// TODO(rsc):
// Test error handling.
@@ -61,6 +61,7 @@ type StartElement struct {
Attr []Attr
}
// Copy creates a new copy of StartElement.
func (e StartElement) Copy() StartElement {
attrs := make([]Attr, len(e.Attr))
copy(attrs, e.Attr)
@@ -89,12 +90,14 @@ func makeCopy(b []byte) []byte {
return b1
}
// Copy creates a new copy of CharData.
func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
// A Comment represents an XML comment of the form <!--comment-->.
// The bytes do not include the <!-- and --> comment markers.
type Comment []byte
// Copy creates a new copy of Comment.
func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
// A ProcInst represents an XML processing instruction of the form <?target inst?>
@@ -103,6 +106,7 @@ type ProcInst struct {
Inst []byte
}
// Copy creates a new copy of ProcInst.
func (p ProcInst) Copy() ProcInst {
p.Inst = makeCopy(p.Inst)
return p
@@ -112,6 +116,7 @@ func (p ProcInst) Copy() ProcInst {
// The bytes do not include the <! and > markers.
type Directive []byte
// Copy creates a new copy of Directive.
func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
// CopyToken returns a copy of a Token.
@@ -131,6 +136,23 @@ func CopyToken(t Token) Token {
return t
}
// A TokenReader is anything that can decode a stream of XML tokens, including a
// Decoder.
//
// When Token encounters an error or end-of-file condition after successfully
// reading a token, it returns the token. It may return the (non-nil) error from
// the same call or return the error (and a nil token) from a subsequent call.
// An instance of this general case is that a TokenReader returning a non-nil
// token at the end of the token stream may return either io.EOF or a nil error.
// The next Read should return nil, io.EOF.
//
// Implementations of Token are discouraged from returning a nil token with a
// nil error. Callers should treat a return of nil, nil as indicating that
// nothing happened; in particular it does not indicate EOF.
type TokenReader interface {
Token() (Token, error)
}
// A Decoder represents an XML parser reading a particular input stream.
// The parser assumes that its input is encoded in UTF-8.
type Decoder struct {
@@ -146,9 +168,9 @@ type Decoder struct {
//
// Setting:
//
// d.Strict = false;
// d.AutoClose = HTMLAutoClose;
// d.Entity = HTMLEntity
// d.Strict = false
// d.AutoClose = xml.HTMLAutoClose
// d.Entity = xml.HTMLEntity
//
// creates a parser that can handle typical HTML.
//
@@ -177,7 +199,7 @@ type Decoder struct {
// charset-conversion readers, converting from the provided
// non-UTF-8 charset into UTF-8. If CharsetReader is nil or
// returns an error, parsing stops with an error. One of the
// the CharsetReader's result values must be non-nil.
// CharsetReader's result values must be non-nil.
CharsetReader func(charset string, input io.Reader) (io.Reader, error)
// DefaultSpace sets the default name space used for unadorned tags,
@@ -189,6 +211,7 @@ type Decoder struct {
TypeFunc func(string) (reflect.Type, bool)
r io.ByteReader
t TokenReader
buf bytes.Buffer
saved *bytes.Buffer
stk *stack
@@ -200,6 +223,7 @@ type Decoder struct {
ns map[string]string
err error
line int
offset int64
unmarshalDepth int
}
@@ -217,12 +241,28 @@ func NewDecoder(r io.Reader) *Decoder {
return d
}
// NewTokenDecoder creates a new XML parser using an underlying token stream.
func NewTokenDecoder(t TokenReader) *Decoder {
// Is it already a Decoder?
if d, ok := t.(*Decoder); ok {
return d
}
d := &Decoder{
ns: make(map[string]string),
t: t,
nextByte: -1,
line: 1,
Strict: true,
}
return d
}
// Token returns the next XML token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Slices of bytes in the returned token data refer to the
// parser's internal buffer and remain valid only until the next
// call to Token. To acquire a copy of the bytes, call CopyToken
// call to Token. To acquire a copy of the bytes, call CopyToken
// or the token's Copy method.
//
// Token expands self-closing elements such as <br/>
@@ -230,25 +270,33 @@ func NewDecoder(r io.Reader) *Decoder {
//
// Token guarantees that the StartElement and EndElement
// tokens it returns are properly nested and matched:
// if Token encounters an unexpected end element,
// if Token encounters an unexpected end element
// or EOF before all expected end elements,
// it will return an error.
//
// Token implements XML name spaces as described by
// http://www.w3.org/TR/REC-xml-names/. Each of the
// https://www.w3.org/TR/REC-xml-names/. Each of the
// Name structures contained in the Token has the Space
// set to the URL identifying its name space when known.
// If Token encounters an unrecognized name space prefix,
// it uses the prefix as the Space rather than report an error.
func (d *Decoder) Token() (t Token, err error) {
func (d *Decoder) Token() (Token, error) {
var t Token
var err error
if d.stk != nil && d.stk.kind == stkEOF {
err = io.EOF
return
return nil, io.EOF
}
if d.nextToken != nil {
t = d.nextToken
d.nextToken = nil
} else if t, err = d.rawToken(); err != nil {
return
switch {
case err == io.EOF && d.t != nil:
err = nil
case err == io.EOF && d.stk != nil && d.stk.kind != stkEOF:
err = d.syntaxError("unexpected EOF")
}
return t, err
}
if !d.Strict {
@@ -264,12 +312,12 @@ func (d *Decoder) Token() (t Token, err error) {
// to the other attribute names, so process
// the translations first.
for _, a := range t1.Attr {
if a.Name.Space == "xmlns" {
if a.Name.Space == xmlnsPrefix {
v, ok := d.ns[a.Name.Local]
d.pushNs(a.Name.Local, v, ok)
d.ns[a.Name.Local] = a.Value
}
if a.Name.Space == "" && a.Name.Local == "xmlns" {
if a.Name.Space == "" && a.Name.Local == xmlnsPrefix {
// Default space for untagged names
v, ok := d.ns[""]
d.pushNs("", v, ok)
@@ -291,23 +339,27 @@ func (d *Decoder) Token() (t Token, err error) {
}
t = t1
}
return
return t, err
}
const xmlURL = "http://www.w3.org/XML/1998/namespace"
const (
xmlURL = "http://www.w3.org/XML/1998/namespace"
xmlnsPrefix = "xmlns"
xmlPrefix = "xml"
)
// Apply name space translation to name n.
// The default name space (for Space=="")
// applies only to element names, not to attribute names.
func (d *Decoder) translate(n *Name, isElementName bool) {
switch {
case n.Space == "xmlns":
case n.Space == xmlnsPrefix:
return
case n.Space == "" && !isElementName:
return
case n.Space == "xml":
case n.Space == xmlPrefix:
n.Space = xmlURL
case n.Space == "" && n.Local == "xmlns":
case n.Space == "" && n.Local == xmlnsPrefix:
return
}
if v, ok := d.ns[n.Space]; ok {
@@ -330,7 +382,7 @@ func (d *Decoder) switchToReader(r io.Reader) {
}
// Parsing state - stack holds old name space translations
// and the current set of open elements. The translations to pop when
// and the current set of open elements. The translations to pop when
// ending a given tag are *below* it on the stack, which is
// more work but forced on us by XML.
type stack struct {
@@ -501,6 +553,9 @@ func (d *Decoder) RawToken() (Token, error) {
}
func (d *Decoder) rawToken() (Token, error) {
if d.t != nil {
return d.t.Token()
}
if d.err != nil {
return nil, d.err
}
@@ -552,7 +607,6 @@ func (d *Decoder) rawToken() (Token, error) {
case '?':
// <?: Processing instruction.
// TODO(rsc): Should parse the <?xml declaration to make sure the version is 1.0.
var target string
if target, ok = d.name(); !ok {
if d.err == nil {
@@ -577,8 +631,14 @@ func (d *Decoder) rawToken() (Token, error) {
data = data[0 : len(data)-2] // chop ?>
if target == "xml" {
enc := procInstEncoding(string(data))
if enc != "" && enc != "utf-8" && enc != "UTF-8" {
content := string(data)
ver := procInst("version", content)
if ver != "" && ver != "1.0" {
d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver)
return nil, d.err
}
enc := procInst("encoding", content)
if enc != "" && enc != "utf-8" && enc != "UTF-8" && !strings.EqualFold(enc, "utf-8") {
if d.CharsetReader == nil {
d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc)
return nil, d.err
@@ -619,7 +679,12 @@ func (d *Decoder) rawToken() (Token, error) {
return nil, d.err
}
d.buf.WriteByte(b)
if b0 == '-' && b1 == '-' && b == '>' {
if b0 == '-' && b1 == '-' {
if b != '>' {
d.err = d.syntaxError(
`invalid sequence "--" not allowed in comments`)
return nil, d.err
}
break
}
b0, b1 = b1, b
@@ -726,7 +791,7 @@ func (d *Decoder) rawToken() (Token, error) {
return nil, d.err
}
attr = make([]Attr, 0, 4)
attr = []Attr{}
for {
d.space()
if b, ok = d.mustgetc(); !ok {
@@ -748,14 +813,7 @@ func (d *Decoder) rawToken() (Token, error) {
}
d.ungetc(b)
n := len(attr)
if n >= cap(attr) {
nattr := make([]Attr, n, 2*cap(attr))
copy(nattr, attr)
attr = nattr
}
attr = attr[0 : n+1]
a := &attr[n]
a := Attr{}
if a.Name, ok = d.nsname(); !ok {
if d.err == nil {
d.err = d.syntaxError("expected attribute name in element")
@@ -770,10 +828,9 @@ func (d *Decoder) rawToken() (Token, error) {
if d.Strict {
d.err = d.syntaxError("attribute name without = in element")
return nil, d.err
} else {
d.ungetc(b)
a.Value = a.Name.Local
}
d.ungetc(b)
a.Value = a.Name.Local
} else {
d.space()
data := d.attrval()
@@ -782,6 +839,7 @@ func (d *Decoder) rawToken() (Token, error) {
}
a.Value = string(data)
}
attr = append(attr, a)
}
if empty {
d.needClose = true
@@ -812,7 +870,7 @@ func (d *Decoder) attrval() []byte {
if !ok {
return nil
}
// http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
// https://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
'0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
d.buf.WriteByte(b)
@@ -863,9 +921,17 @@ func (d *Decoder) getc() (b byte, ok bool) {
if b == '\n' {
d.line++
}
d.offset++
return b, true
}
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (d *Decoder) InputOffset() int64 {
return d.offset
}
// Return saved offset.
// If we did ungetc (nextByte >= 0), have to back up one.
func (d *Decoder) savedOffset() int {
@@ -895,9 +961,10 @@ func (d *Decoder) ungetc(b byte) {
d.line--
}
d.nextByte = int(b)
d.offset--
}
var entity = map[string]int{
var entity = map[string]rune{
"lt": '<',
"gt": '>',
"amp": '&',
@@ -992,7 +1059,7 @@ Input:
d.buf.WriteByte(';')
n, err := strconv.ParseUint(s, base, 64)
if err == nil && n <= unicode.MaxRune {
text = string(n)
text = string(rune(n))
haveText = true
}
}
@@ -1002,7 +1069,6 @@ Input:
if d.err != nil {
return nil
}
ok = false
}
if b, ok = d.mustgetc(); !ok {
return nil
@@ -1075,13 +1141,13 @@ Input:
}
// Decide whether the given rune is in the XML Character Range, per
// the Char production of http://www.xml.com/axml/testaxml.htm,
// the Char production of https://www.xml.com/axml/testaxml.htm,
// Section 2.2 Characters.
func isInCharacterRange(r rune) (inrange bool) {
return r == 0x09 ||
r == 0x0A ||
r == 0x0D ||
r >= 0x20 && r <= 0xDF77 ||
r >= 0x20 && r <= 0xD7FF ||
r >= 0xE000 && r <= 0xFFFD ||
r >= 0x10000 && r <= 0x10FFFF
}
@@ -1113,12 +1179,12 @@ func (d *Decoder) name() (s string, ok bool) {
}
// Now we check the characters.
s = d.buf.String()
if !isName([]byte(s)) {
d.err = d.syntaxError("invalid XML name: " + s)
b := d.buf.Bytes()
if !isName(b) {
d.err = d.syntaxError("invalid XML name: " + string(b))
return "", false
}
return s, true
return string(b), true
}
// Read a name and append its bytes to d.buf.
@@ -1204,8 +1270,8 @@ func isNameString(s string) bool {
}
// These tables were generated by cut and paste from Appendix B of
// the XML spec at http://www.xml.com/axml/testaxml.htm
// and then reformatting. First corresponds to (Letter | '_' | ':')
// the XML spec at https://www.xml.com/axml/testaxml.htm
// and then reformatting. First corresponds to (Letter | '_' | ':')
// and second corresponds to NameChar.
var first = &unicode.RangeTable{
@@ -1522,7 +1588,9 @@ var second = &unicode.RangeTable{
// HTMLEntity is an entity map containing translations for the
// standard HTML entity characters.
var HTMLEntity = htmlEntity
//
// See the Decoder.Strict and Decoder.Entity fields' documentation.
var HTMLEntity map[string]string = htmlEntity
var htmlEntity = map[string]string{
/*
@@ -1789,7 +1857,9 @@ var htmlEntity = map[string]string{
// HTMLAutoClose is the set of HTML elements that
// should be considered to close automatically.
var HTMLAutoClose = htmlAutoClose
//
// See the Decoder.Strict and Decoder.Entity fields' documentation.
var HTMLAutoClose []string = htmlAutoClose
var htmlAutoClose = []string{
/*
@@ -1812,20 +1882,27 @@ var htmlAutoClose = []string{
}
var (
esc_quot = []byte("&#34;") // shorter than "&quot;"
esc_apos = []byte("&#39;") // shorter than "&apos;"
esc_amp = []byte("&amp;")
esc_lt = []byte("&lt;")
esc_gt = []byte("&gt;")
esc_tab = []byte("&#x9;")
esc_nl = []byte("&#xA;")
esc_cr = []byte("&#xD;")
esc_fffd = []byte("\uFFFD") // Unicode replacement character
escQuot = []byte("&#34;") // shorter than "&quot;"
escApos = []byte("&#39;") // shorter than "&apos;"
escAmp = []byte("&amp;")
escLT = []byte("&lt;")
escGT = []byte("&gt;")
escTab = []byte("&#x9;")
escNL = []byte("&#xA;")
escCR = []byte("&#xD;")
escFFFD = []byte("\uFFFD") // Unicode replacement character
)
// EscapeText writes to w the properly escaped XML equivalent
// of the plain text data s.
func EscapeText(w io.Writer, s []byte) error {
return escapeText(w, s, true)
}
// escapeText writes to w the properly escaped XML equivalent
// of the plain text data s. If escapeNewline is true, newline
// characters will be escaped.
func escapeText(w io.Writer, s []byte, escapeNewline bool) error {
var esc []byte
last := 0
for i := 0; i < len(s); {
@@ -1833,24 +1910,27 @@ func EscapeText(w io.Writer, s []byte) error {
i += width
switch r {
case '"':
esc = esc_quot
esc = escQuot
case '\'':
esc = esc_apos
esc = escApos
case '&':
esc = esc_amp
esc = escAmp
case '<':
esc = esc_lt
esc = escLT
case '>':
esc = esc_gt
esc = escGT
case '\t':
esc = esc_tab
esc = escTab
case '\n':
esc = esc_nl
if !escapeNewline {
continue
}
esc = escNL
case '\r':
esc = esc_cr
esc = escCR
default:
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
esc = esc_fffd
esc = escFFFD
break
}
continue
@@ -1863,10 +1943,8 @@ func EscapeText(w io.Writer, s []byte) error {
}
last = i
}
if _, err := w.Write(s[last:]); err != nil {
return err
}
return nil
_, err := w.Write(s[last:])
return err
}
// EscapeString writes to p the properly escaped XML equivalent
@@ -1879,24 +1957,24 @@ func (p *printer) EscapeString(s string) {
i += width
switch r {
case '"':
esc = esc_quot
esc = escQuot
case '\'':
esc = esc_apos
esc = escApos
case '&':
esc = esc_amp
esc = escAmp
case '<':
esc = esc_lt
esc = escLT
case '>':
esc = esc_gt
esc = escGT
case '\t':
esc = esc_tab
esc = escTab
case '\n':
esc = esc_nl
esc = escNL
case '\r':
esc = esc_cr
esc = escCR
default:
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
esc = esc_fffd
esc = escFFFD
break
}
continue
@@ -1915,16 +1993,55 @@ func Escape(w io.Writer, s []byte) {
EscapeText(w, s)
}
// procInstEncoding parses the `encoding="..."` or `encoding='...'`
var (
cdataStart = []byte("<![CDATA[")
cdataEnd = []byte("]]>")
cdataEscape = []byte("]]]]><![CDATA[>")
)
// emitCDATA writes to w the CDATA-wrapped plain text data s.
// It escapes CDATA directives nested in s.
func emitCDATA(w io.Writer, s []byte) error {
if len(s) == 0 {
return nil
}
if _, err := w.Write(cdataStart); err != nil {
return err
}
for {
i := bytes.Index(s, cdataEnd)
if i >= 0 && i+len(cdataEnd) <= len(s) {
// Found a nested CDATA directive end.
if _, err := w.Write(s[:i]); err != nil {
return err
}
if _, err := w.Write(cdataEscape); err != nil {
return err
}
i += len(cdataEnd)
} else {
if _, err := w.Write(s); err != nil {
return err
}
break
}
s = s[i:]
}
_, err := w.Write(cdataEnd)
return err
}
// procInst parses the `param="..."` or `param='...'`
// value out of the provided string, returning "" if not found.
func procInstEncoding(s string) string {
func procInst(param, s string) string {
// TODO: this parsing is somewhat lame and not exact.
// It works for all actual cases, though.
idx := strings.Index(s, "encoding=")
param = param + "="
idx := strings.Index(s, param)
if idx == -1 {
return ""
}
v := s[idx+len("encoding="):]
v := s[idx+len(param):]
if v == "" {
return ""
}