Update godeps for etcd 3.0.4
This commit is contained in:
2
vendor/github.com/russross/blackfriday/.travis.yml
generated
vendored
2
vendor/github.com/russross/blackfriday/.travis.yml
generated
vendored
@@ -7,6 +7,8 @@ language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
|
||||
install:
|
||||
- go get -d -t -v ./...
|
||||
|
||||
29
vendor/github.com/russross/blackfriday/README.md
generated
vendored
29
vendor/github.com/russross/blackfriday/README.md
generated
vendored
@@ -10,7 +10,7 @@ punctuation substitutions, etc.), and it is safe for all utf-8
|
||||
HTML output is currently supported, along with Smartypants
|
||||
extensions. An experimental LaTeX output engine is also included.
|
||||
|
||||
It started as a translation from C of [upskirt][3].
|
||||
It started as a translation from C of [Sundown][3].
|
||||
|
||||
|
||||
Installation
|
||||
@@ -97,7 +97,7 @@ dependencies and library versions.
|
||||
Features
|
||||
--------
|
||||
|
||||
All features of upskirt are supported, including:
|
||||
All features of Sundown are supported, including:
|
||||
|
||||
* **Compatibility**. The Markdown v1.0.3 test suite passes with
|
||||
the `--tidy` option. Without `--tidy`, the differences are
|
||||
@@ -169,6 +169,25 @@ implements the following extensions:
|
||||
You can use 3 or more backticks to mark the beginning of the
|
||||
block, and the same number to mark the end of the block.
|
||||
|
||||
* **Definition lists**. A simple definition list is made of a single-line
|
||||
term followed by a colon and the definition for that term.
|
||||
|
||||
Cat
|
||||
: Fluffy animal everyone likes
|
||||
|
||||
Internet
|
||||
: Vector of transmission for pictures of cats
|
||||
|
||||
Terms must be separated from the previous definition by a blank line.
|
||||
|
||||
* **Footnotes**. A marker in the text that will become a superscript number;
|
||||
a footnote definition that will be placed in a list of footnotes at the
|
||||
end of the document. A footnote looks like this:
|
||||
|
||||
This is a footnote.[^1]
|
||||
|
||||
[^1]: the footnote text.
|
||||
|
||||
* **Autolinking**. Blackfriday can find URLs that have not been
|
||||
explicitly marked as links and turn them into links.
|
||||
|
||||
@@ -203,7 +222,7 @@ Other renderers
|
||||
Blackfriday is structured to allow alternative rendering engines. Here
|
||||
are a few of note:
|
||||
|
||||
* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/go/github_flavored_markdown):
|
||||
* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
|
||||
provides a GitHub Flavored Markdown renderer with fenced code block
|
||||
highlighting, clickable header anchor links.
|
||||
|
||||
@@ -223,6 +242,8 @@ are a few of note:
|
||||
point. In particular, it does not do any inline escaping, so input
|
||||
that happens to look like LaTeX code will be passed through without
|
||||
modification.
|
||||
|
||||
* [Md2Vim](https://github.com/FooSoft/md2vim): transforms markdown files into vimdoc format.
|
||||
|
||||
|
||||
Todo
|
||||
@@ -243,4 +264,4 @@ License
|
||||
|
||||
[1]: http://daringfireball.net/projects/markdown/ "Markdown"
|
||||
[2]: http://golang.org/ "Go Language"
|
||||
[3]: http://github.com/tanoku/upskirt "Upskirt"
|
||||
[3]: https://github.com/vmg/sundown "Sundown"
|
||||
|
||||
193
vendor/github.com/russross/blackfriday/block.go
generated
vendored
193
vendor/github.com/russross/blackfriday/block.go
generated
vendored
@@ -166,6 +166,21 @@ func (p *parser) block(out *bytes.Buffer, data []byte) {
|
||||
continue
|
||||
}
|
||||
|
||||
// definition lists:
|
||||
//
|
||||
// Term 1
|
||||
// : Definition a
|
||||
// : Definition b
|
||||
//
|
||||
// Term 2
|
||||
// : Definition c
|
||||
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
||||
if p.dliPrefix(data) > 0 {
|
||||
data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// anything else must look like a normal paragraph
|
||||
// note: this finds underlined headers, too
|
||||
data = data[p.paragraph(out, data):]
|
||||
@@ -196,11 +211,8 @@ func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
|
||||
for level < 6 && data[level] == '#' {
|
||||
level++
|
||||
}
|
||||
i, end := 0, 0
|
||||
for i = level; data[i] == ' '; i++ {
|
||||
}
|
||||
for end = i; data[end] != '\n'; end++ {
|
||||
}
|
||||
i := skipChar(data, level, ' ')
|
||||
end := skipUntilChar(data, i, '\n')
|
||||
skip := end
|
||||
id := ""
|
||||
if p.flags&EXTENSION_HEADER_IDS != 0 {
|
||||
@@ -221,6 +233,9 @@ func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
|
||||
}
|
||||
}
|
||||
for end > 0 && data[end-1] == '#' {
|
||||
if isBackslashEscaped(data, end-1) {
|
||||
break
|
||||
}
|
||||
end--
|
||||
}
|
||||
for end > 0 && data[end-1] == ' ' {
|
||||
@@ -242,13 +257,8 @@ func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
|
||||
func (p *parser) isUnderlinedHeader(data []byte) int {
|
||||
// test of level 1 header
|
||||
if data[0] == '=' {
|
||||
i := 1
|
||||
for data[i] == '=' {
|
||||
i++
|
||||
}
|
||||
for data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
i := skipChar(data, 1, '=')
|
||||
i = skipChar(data, i, ' ')
|
||||
if data[i] == '\n' {
|
||||
return 1
|
||||
} else {
|
||||
@@ -258,13 +268,8 @@ func (p *parser) isUnderlinedHeader(data []byte) int {
|
||||
|
||||
// test of level 2 header
|
||||
if data[0] == '-' {
|
||||
i := 1
|
||||
for data[i] == '-' {
|
||||
i++
|
||||
}
|
||||
for data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
i := skipChar(data, 1, '-')
|
||||
i = skipChar(data, i, ' ')
|
||||
if data[i] == '\n' {
|
||||
return 2
|
||||
} else {
|
||||
@@ -394,23 +399,7 @@ func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
|
||||
// HTML comment, lax form
|
||||
func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
||||
return 0
|
||||
}
|
||||
|
||||
i := 5
|
||||
|
||||
// scan for an end-of-comment marker, across lines if necessary
|
||||
for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
||||
i++
|
||||
}
|
||||
i++
|
||||
|
||||
// no end-of-comment marker
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
|
||||
i := p.inlineHtmlComment(out, data)
|
||||
// needs to end with a blank line
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
@@ -424,7 +413,6 @@ func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -468,7 +456,7 @@ func (p *parser) htmlFindTag(data []byte) (string, bool) {
|
||||
i++
|
||||
}
|
||||
key := string(data[:i])
|
||||
if blockTags[key] {
|
||||
if _, ok := blockTags[key]; ok {
|
||||
return key, true
|
||||
}
|
||||
return "", false
|
||||
@@ -593,10 +581,7 @@ func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (s
|
||||
|
||||
if syntax != nil {
|
||||
syn := 0
|
||||
|
||||
for i < len(data) && data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
i = skipChar(data, i, ' ')
|
||||
|
||||
if i >= len(data) {
|
||||
return
|
||||
@@ -640,9 +625,7 @@ func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (s
|
||||
*syntax = &language
|
||||
}
|
||||
|
||||
for i < len(data) && data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
i = skipChar(data, i, ' ')
|
||||
if i >= len(data) || data[i] != '\n' {
|
||||
return
|
||||
}
|
||||
@@ -671,11 +654,7 @@ func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
}
|
||||
|
||||
// copy the current line
|
||||
end := beg
|
||||
for end < len(data) && data[end] != '\n' {
|
||||
end++
|
||||
}
|
||||
end++
|
||||
end := skipUntilChar(data, beg, '\n') + 1
|
||||
|
||||
// did we reach the end of the buffer without a closing marker?
|
||||
if end >= len(data) {
|
||||
@@ -733,7 +712,7 @@ func (p *parser) table(out *bytes.Buffer, data []byte) int {
|
||||
return i
|
||||
}
|
||||
|
||||
// check if the specified position is preceeded by an odd number of backslashes
|
||||
// check if the specified position is preceded by an odd number of backslashes
|
||||
func isBackslashEscaped(data []byte, i int) bool {
|
||||
backslashes := 0
|
||||
for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
|
||||
@@ -778,9 +757,7 @@ func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns
|
||||
if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||||
i++
|
||||
}
|
||||
for data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
i = skipChar(data, i, ' ')
|
||||
|
||||
// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
|
||||
// and trailing | optional on last column
|
||||
@@ -914,13 +891,35 @@ func (p *parser) quotePrefix(data []byte) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// blockquote ends with at least one blank line
|
||||
// followed by something without a blockquote prefix
|
||||
func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
|
||||
if p.isEmpty(data[beg:]) <= 0 {
|
||||
return false
|
||||
}
|
||||
if end >= len(data) {
|
||||
return true
|
||||
}
|
||||
return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
||||
}
|
||||
|
||||
// parse a blockquote fragment
|
||||
func (p *parser) quote(out *bytes.Buffer, data []byte) int {
|
||||
var raw bytes.Buffer
|
||||
beg, end := 0, 0
|
||||
for beg < len(data) {
|
||||
end = beg
|
||||
// Step over whole lines, collecting them. While doing that, check for
|
||||
// fenced code and if one's found, incorporate it altogether,
|
||||
// irregardless of any contents inside it
|
||||
for data[end] != '\n' {
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
if i := p.fencedCode(out, data[end:], false); i > 0 {
|
||||
// -1 to compensate for the extra end++ after the loop:
|
||||
end += i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
end++
|
||||
}
|
||||
end++
|
||||
@@ -928,11 +927,7 @@ func (p *parser) quote(out *bytes.Buffer, data []byte) int {
|
||||
if pre := p.quotePrefix(data[beg:]); pre > 0 {
|
||||
// skip the prefix
|
||||
beg += pre
|
||||
} else if p.isEmpty(data[beg:]) > 0 &&
|
||||
(end >= len(data) ||
|
||||
(p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0)) {
|
||||
// blockquote ends with at least one blank line
|
||||
// followed by something without a blockquote prefix
|
||||
} else if p.terminateBlockquote(data, beg, end) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -1039,6 +1034,20 @@ func (p *parser) oliPrefix(data []byte) int {
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// returns definition list item prefix
|
||||
func (p *parser) dliPrefix(data []byte) int {
|
||||
i := 0
|
||||
|
||||
// need a : followed by a spaces
|
||||
if data[i] != ':' || data[i+1] != ' ' {
|
||||
return 0
|
||||
}
|
||||
for data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// parse ordered or unordered list block
|
||||
func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
|
||||
i := 0
|
||||
@@ -1074,7 +1083,19 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
|
||||
i = p.oliPrefix(data)
|
||||
}
|
||||
if i == 0 {
|
||||
return 0
|
||||
i = p.dliPrefix(data)
|
||||
// reset definition term flag
|
||||
if i > 0 {
|
||||
*flags &= ^LIST_TYPE_TERM
|
||||
}
|
||||
}
|
||||
if i == 0 {
|
||||
// if in defnition list, set term flag and continue
|
||||
if *flags&LIST_TYPE_DEFINITION != 0 {
|
||||
*flags |= LIST_TYPE_TERM
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// skip leading whitespace on first line
|
||||
@@ -1084,7 +1105,7 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
|
||||
|
||||
// find the end of the line
|
||||
line := i
|
||||
for data[i-1] != '\n' {
|
||||
for i > 0 && data[i-1] != '\n' {
|
||||
i++
|
||||
}
|
||||
|
||||
@@ -1128,7 +1149,8 @@ gatherlines:
|
||||
switch {
|
||||
// is this a nested list item?
|
||||
case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
|
||||
p.oliPrefix(chunk) > 0:
|
||||
p.oliPrefix(chunk) > 0 ||
|
||||
p.dliPrefix(chunk) > 0:
|
||||
|
||||
if containsBlankLine {
|
||||
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
||||
@@ -1140,7 +1162,7 @@ gatherlines:
|
||||
break gatherlines
|
||||
}
|
||||
|
||||
// is this the first item in the the nested list?
|
||||
// is this the first item in the nested list?
|
||||
if sublist == 0 {
|
||||
sublist = raw.Len()
|
||||
}
|
||||
@@ -1159,7 +1181,21 @@ gatherlines:
|
||||
// of this item if it is indented 4 spaces
|
||||
// (regardless of the indentation of the beginning of the item)
|
||||
case containsBlankLine && indent < 4:
|
||||
*flags |= LIST_ITEM_END_OF_LIST
|
||||
if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
|
||||
// is the next item still a part of this list?
|
||||
next := i
|
||||
for data[next] != '\n' {
|
||||
next++
|
||||
}
|
||||
for next < len(data)-1 && data[next] == '\n' {
|
||||
next++
|
||||
}
|
||||
if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
|
||||
*flags |= LIST_ITEM_END_OF_LIST
|
||||
}
|
||||
} else {
|
||||
*flags |= LIST_ITEM_END_OF_LIST
|
||||
}
|
||||
break gatherlines
|
||||
|
||||
// a blank line means this should be parsed as a block
|
||||
@@ -1173,6 +1209,7 @@ gatherlines:
|
||||
if containsBlankLine {
|
||||
containsBlankLine = false
|
||||
raw.WriteByte('\n')
|
||||
|
||||
}
|
||||
|
||||
// add the line into the working buffer without prefix
|
||||
@@ -1185,8 +1222,8 @@ gatherlines:
|
||||
|
||||
// render the contents of the list item
|
||||
var cooked bytes.Buffer
|
||||
if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 {
|
||||
// intermediate render of block li
|
||||
if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
|
||||
// intermediate render of block item, except for definition term
|
||||
if sublist > 0 {
|
||||
p.block(&cooked, rawBytes[:sublist])
|
||||
p.block(&cooked, rawBytes[sublist:])
|
||||
@@ -1194,7 +1231,7 @@ gatherlines:
|
||||
p.block(&cooked, rawBytes)
|
||||
}
|
||||
} else {
|
||||
// intermediate render of inline li
|
||||
// intermediate render of inline item
|
||||
if sublist > 0 {
|
||||
p.inline(&cooked, rawBytes[:sublist])
|
||||
p.block(&cooked, rawBytes[sublist:])
|
||||
@@ -1258,6 +1295,13 @@ func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
|
||||
|
||||
// did we find a blank line marking the end of the paragraph?
|
||||
if n := p.isEmpty(current); n > 0 {
|
||||
// did this blank line followed by a definition list item?
|
||||
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
||||
if i < len(data)-1 && data[i+1] == ':' {
|
||||
return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
|
||||
}
|
||||
}
|
||||
|
||||
p.renderParagraph(out, data[:i])
|
||||
return i + n
|
||||
}
|
||||
@@ -1316,6 +1360,21 @@ func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
|
||||
return i
|
||||
}
|
||||
|
||||
// if there's a fenced code block, paragraph is over
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
if p.fencedCode(out, current, false) > 0 {
|
||||
p.renderParagraph(out, data[:i])
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// if there's a definition list item, prev line is a definition term
|
||||
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
||||
if p.dliPrefix(current) != 0 {
|
||||
return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
|
||||
}
|
||||
}
|
||||
|
||||
// if there's a list after this, paragraph is over
|
||||
if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
|
||||
if p.uliPrefix(current) != 0 ||
|
||||
|
||||
91
vendor/github.com/russross/blackfriday/html.go
generated
vendored
91
vendor/github.com/russross/blackfriday/html.go
generated
vendored
@@ -31,6 +31,7 @@ const (
|
||||
HTML_SKIP_LINKS // skip all links
|
||||
HTML_SAFELINK // only link to trusted protocols
|
||||
HTML_NOFOLLOW_LINKS // only link with rel="nofollow"
|
||||
HTML_NOREFERRER_LINKS // only link with rel="noreferrer"
|
||||
HTML_HREF_TARGET_BLANK // add a blank target
|
||||
HTML_TOC // generate a table of contents
|
||||
HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents)
|
||||
@@ -38,7 +39,8 @@ const (
|
||||
HTML_USE_XHTML // generate XHTML output instead of HTML
|
||||
HTML_USE_SMARTYPANTS // enable smart punctuation substitutions
|
||||
HTML_SMARTYPANTS_FRACTIONS // enable smart fractions (with HTML_USE_SMARTYPANTS)
|
||||
HTML_SMARTYPANTS_LATEX_DASHES // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS)
|
||||
HTML_SMARTYPANTS_DASHES // enable smart dashes (with HTML_USE_SMARTYPANTS)
|
||||
HTML_SMARTYPANTS_LATEX_DASHES // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS and HTML_SMARTYPANTS_DASHES)
|
||||
HTML_SMARTYPANTS_ANGLED_QUOTES // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering
|
||||
HTML_FOOTNOTE_RETURN_LINKS // generate a link at the end of a footnote to return to the source
|
||||
)
|
||||
@@ -75,7 +77,7 @@ type HtmlRendererParameters struct {
|
||||
// Do not create this directly, instead use the HtmlRenderer function.
|
||||
type Html struct {
|
||||
flags int // HTML_* options
|
||||
closeTag string // how to end singleton tags: either " />\n" or ">\n"
|
||||
closeTag string // how to end singleton tags: either " />" or ">"
|
||||
title string // document title
|
||||
css string // optional css file url (used with HTML_COMPLETE_PAGE)
|
||||
|
||||
@@ -94,8 +96,8 @@ type Html struct {
|
||||
}
|
||||
|
||||
const (
|
||||
xhtmlClose = " />\n"
|
||||
htmlClose = ">\n"
|
||||
xhtmlClose = " />"
|
||||
htmlClose = ">"
|
||||
)
|
||||
|
||||
// HtmlRenderer creates and configures an Html object, which
|
||||
@@ -249,6 +251,7 @@ func (options *Html) HRule(out *bytes.Buffer) {
|
||||
doubleSpace(out)
|
||||
out.WriteString("<hr")
|
||||
out.WriteString(options.closeTag)
|
||||
out.WriteByte('\n')
|
||||
}
|
||||
|
||||
func (options *Html) BlockCode(out *bytes.Buffer, text []byte, lang string) {
|
||||
@@ -373,7 +376,9 @@ func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
|
||||
marker := out.Len()
|
||||
doubleSpace(out)
|
||||
|
||||
if flags&LIST_TYPE_ORDERED != 0 {
|
||||
if flags&LIST_TYPE_DEFINITION != 0 {
|
||||
out.WriteString("<dl>")
|
||||
} else if flags&LIST_TYPE_ORDERED != 0 {
|
||||
out.WriteString("<ol>")
|
||||
} else {
|
||||
out.WriteString("<ul>")
|
||||
@@ -382,7 +387,9 @@ func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
|
||||
out.Truncate(marker)
|
||||
return
|
||||
}
|
||||
if flags&LIST_TYPE_ORDERED != 0 {
|
||||
if flags&LIST_TYPE_DEFINITION != 0 {
|
||||
out.WriteString("</dl>\n")
|
||||
} else if flags&LIST_TYPE_ORDERED != 0 {
|
||||
out.WriteString("</ol>\n")
|
||||
} else {
|
||||
out.WriteString("</ul>\n")
|
||||
@@ -390,12 +397,25 @@ func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
|
||||
}
|
||||
|
||||
func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
||||
if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
|
||||
if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
|
||||
flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
|
||||
doubleSpace(out)
|
||||
}
|
||||
out.WriteString("<li>")
|
||||
if flags&LIST_TYPE_TERM != 0 {
|
||||
out.WriteString("<dt>")
|
||||
} else if flags&LIST_TYPE_DEFINITION != 0 {
|
||||
out.WriteString("<dd>")
|
||||
} else {
|
||||
out.WriteString("<li>")
|
||||
}
|
||||
out.Write(text)
|
||||
out.WriteString("</li>\n")
|
||||
if flags&LIST_TYPE_TERM != 0 {
|
||||
out.WriteString("</dt>\n")
|
||||
} else if flags&LIST_TYPE_DEFINITION != 0 {
|
||||
out.WriteString("</dd>\n")
|
||||
} else {
|
||||
out.WriteString("</li>\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
|
||||
@@ -429,9 +449,17 @@ func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
|
||||
|
||||
entityEscapeWithSkip(out, link, skipRanges)
|
||||
|
||||
var relAttrs []string
|
||||
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
|
||||
out.WriteString("\" rel=\"nofollow")
|
||||
relAttrs = append(relAttrs, "nofollow")
|
||||
}
|
||||
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
|
||||
relAttrs = append(relAttrs, "noreferrer")
|
||||
}
|
||||
if len(relAttrs) > 0 {
|
||||
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
|
||||
}
|
||||
|
||||
// blank target only add to external link
|
||||
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
|
||||
out.WriteString("\" target=\"_blank")
|
||||
@@ -476,7 +504,7 @@ func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
|
||||
}
|
||||
|
||||
func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
|
||||
if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) {
|
||||
if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||||
out.WriteString(options.parameters.AbsolutePrefix)
|
||||
if link[0] != '/' {
|
||||
out.WriteByte('/')
|
||||
@@ -503,12 +531,12 @@ func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []b
|
||||
|
||||
out.WriteByte('"')
|
||||
out.WriteString(options.closeTag)
|
||||
return
|
||||
}
|
||||
|
||||
func (options *Html) LineBreak(out *bytes.Buffer) {
|
||||
out.WriteString("<br")
|
||||
out.WriteString(options.closeTag)
|
||||
out.WriteByte('\n')
|
||||
}
|
||||
|
||||
func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
||||
@@ -535,9 +563,17 @@ func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content
|
||||
out.WriteString("\" title=\"")
|
||||
attrEscape(out, title)
|
||||
}
|
||||
var relAttrs []string
|
||||
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
|
||||
out.WriteString("\" rel=\"nofollow")
|
||||
relAttrs = append(relAttrs, "nofollow")
|
||||
}
|
||||
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
|
||||
relAttrs = append(relAttrs, "noreferrer")
|
||||
}
|
||||
if len(relAttrs) > 0 {
|
||||
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
|
||||
}
|
||||
|
||||
// blank target only add to external link
|
||||
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
|
||||
out.WriteString("\" target=\"_blank")
|
||||
@@ -850,6 +886,14 @@ func skipSpace(tag []byte, i int) int {
|
||||
return i
|
||||
}
|
||||
|
||||
func skipChar(data []byte, start int, char byte) int {
|
||||
i := start
|
||||
for i < len(data) && data[i] == char {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func doubleSpace(out *bytes.Buffer) {
|
||||
if out.Len() > 0 {
|
||||
out.WriteByte('\n')
|
||||
@@ -857,23 +901,32 @@ func doubleSpace(out *bytes.Buffer) {
|
||||
}
|
||||
|
||||
func isRelativeLink(link []byte) (yes bool) {
|
||||
yes = false
|
||||
|
||||
// a tag begin with '#'
|
||||
if link[0] == '#' {
|
||||
yes = true
|
||||
return true
|
||||
}
|
||||
|
||||
// link begin with '/' but not '//', the second maybe a protocol relative link
|
||||
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
||||
yes = true
|
||||
return true
|
||||
}
|
||||
|
||||
// only the root '/'
|
||||
if len(link) == 1 && link[0] == '/' {
|
||||
yes = true
|
||||
return true
|
||||
}
|
||||
return
|
||||
|
||||
// current directory : begin with "./"
|
||||
if bytes.HasPrefix(link, []byte("./")) {
|
||||
return true
|
||||
}
|
||||
|
||||
// parent directory : begin with "../"
|
||||
if bytes.HasPrefix(link, []byte("../")) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (options *Html) ensureUniqueHeaderID(id string) string {
|
||||
|
||||
111
vendor/github.com/russross/blackfriday/inline.go
generated
vendored
111
vendor/github.com/russross/blackfriday/inline.go
generated
vendored
@@ -167,12 +167,17 @@ func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
out.Truncate(eol)
|
||||
|
||||
precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
|
||||
precededByBackslash := offset >= 1 && data[offset-1] == '\\' // see http://spec.commonmark.org/0.18/#example-527
|
||||
precededByBackslash = precededByBackslash && p.flags&EXTENSION_BACKSLASH_LINE_BREAK != 0
|
||||
|
||||
// should there be a hard line break here?
|
||||
if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces {
|
||||
if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces && !precededByBackslash {
|
||||
return 0
|
||||
}
|
||||
|
||||
if precededByBackslash && eol > 0 {
|
||||
out.Truncate(eol - 1)
|
||||
}
|
||||
p.r.LineBreak(out)
|
||||
return 1
|
||||
}
|
||||
@@ -186,6 +191,13 @@ const (
|
||||
linkInlineFootnote
|
||||
)
|
||||
|
||||
func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
|
||||
if t == linkDeferredFootnote {
|
||||
return false
|
||||
}
|
||||
return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
|
||||
}
|
||||
|
||||
// '[': parse a link or an image or a footnote
|
||||
func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
// no links allowed inside regular links, footnote, and deferred footnotes
|
||||
@@ -193,28 +205,35 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// [text] == regular link
|
||||
var t linkType
|
||||
switch {
|
||||
// special case: ![^text] == deferred footnote (that follows something with
|
||||
// an exclamation point)
|
||||
case p.flags&EXTENSION_FOOTNOTES != 0 && len(data)-1 > offset && data[offset+1] == '^':
|
||||
t = linkDeferredFootnote
|
||||
// ![alt] == image
|
||||
case offset > 0 && data[offset-1] == '!':
|
||||
t = linkImg
|
||||
// ^[text] == inline footnote
|
||||
// [^refId] == deferred footnote
|
||||
var t linkType
|
||||
if offset > 0 && data[offset-1] == '!' {
|
||||
t = linkImg
|
||||
} else if p.flags&EXTENSION_FOOTNOTES != 0 {
|
||||
case p.flags&EXTENSION_FOOTNOTES != 0:
|
||||
if offset > 0 && data[offset-1] == '^' {
|
||||
t = linkInlineFootnote
|
||||
} else if len(data)-1 > offset && data[offset+1] == '^' {
|
||||
t = linkDeferredFootnote
|
||||
}
|
||||
// [text] == regular link
|
||||
default:
|
||||
t = linkNormal
|
||||
}
|
||||
|
||||
data = data[offset:]
|
||||
|
||||
var (
|
||||
i = 1
|
||||
noteId int
|
||||
title, link []byte
|
||||
textHasNl = false
|
||||
i = 1
|
||||
noteId int
|
||||
title, link, altContent []byte
|
||||
textHasNl = false
|
||||
)
|
||||
|
||||
if t == linkDeferredFootnote {
|
||||
@@ -348,8 +367,9 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
i++
|
||||
|
||||
// reference style link
|
||||
case i < len(data) && data[i] == '[':
|
||||
case isReferenceStyleLink(data, i, t):
|
||||
var id []byte
|
||||
altContentConsidered := false
|
||||
|
||||
// look for the id
|
||||
i++
|
||||
@@ -379,22 +399,24 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
id = b.Bytes()
|
||||
} else {
|
||||
id = data[1:txtE]
|
||||
altContentConsidered = true
|
||||
}
|
||||
} else {
|
||||
id = data[linkB:linkE]
|
||||
}
|
||||
|
||||
// find the reference with matching id (ids are case-insensitive)
|
||||
key := string(bytes.ToLower(id))
|
||||
lr, ok := p.refs[key]
|
||||
// find the reference with matching id
|
||||
lr, ok := p.getRef(string(id))
|
||||
if !ok {
|
||||
return 0
|
||||
|
||||
}
|
||||
|
||||
// keep link and title from reference
|
||||
link = lr.link
|
||||
title = lr.title
|
||||
if altContentConsidered {
|
||||
altContent = lr.text
|
||||
}
|
||||
i++
|
||||
|
||||
// shortcut reference style link or reference or inline footnote
|
||||
@@ -423,7 +445,6 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
}
|
||||
}
|
||||
|
||||
key := string(bytes.ToLower(id))
|
||||
if t == linkInlineFootnote {
|
||||
// create a new reference
|
||||
noteId = len(p.notes) + 1
|
||||
@@ -453,7 +474,7 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
title = ref.title
|
||||
} else {
|
||||
// find the reference with matching id
|
||||
lr, ok := p.refs[key]
|
||||
lr, ok := p.getRef(string(id))
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
@@ -505,7 +526,11 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
// call the relevant rendering function
|
||||
switch t {
|
||||
case linkNormal:
|
||||
p.r.Link(out, uLink, title, content.Bytes())
|
||||
if len(altContent) > 0 {
|
||||
p.r.Link(out, uLink, title, altContent)
|
||||
} else {
|
||||
p.r.Link(out, uLink, title, content.Bytes())
|
||||
}
|
||||
|
||||
case linkImg:
|
||||
outSize := out.Len()
|
||||
@@ -535,12 +560,33 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
return i
|
||||
}
|
||||
|
||||
func (p *parser) inlineHtmlComment(out *bytes.Buffer, data []byte) int {
|
||||
if len(data) < 5 {
|
||||
return 0
|
||||
}
|
||||
if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
||||
return 0
|
||||
}
|
||||
i := 5
|
||||
// scan for an end-of-comment marker, across lines if necessary
|
||||
for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
||||
i++
|
||||
}
|
||||
// no end-of-comment marker
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return i + 1
|
||||
}
|
||||
|
||||
// '<' when tags or autolinks are allowed
|
||||
func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
data = data[offset:]
|
||||
altype := LINK_TYPE_NOT_AUTOLINK
|
||||
end := tagLength(data, &altype)
|
||||
|
||||
if size := p.inlineHtmlComment(out, data); size > 0 {
|
||||
end = size
|
||||
}
|
||||
if end > 2 {
|
||||
if altype != LINK_TYPE_NOT_AUTOLINK {
|
||||
var uLink bytes.Buffer
|
||||
@@ -622,10 +668,7 @@ func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
|
||||
func linkEndsWithEntity(data []byte, linkEnd int) bool {
|
||||
entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
|
||||
if entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
|
||||
}
|
||||
|
||||
func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
@@ -757,9 +800,20 @@ func isEndOfLink(char byte) bool {
|
||||
return isspace(char) || char == '<'
|
||||
}
|
||||
|
||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://"), []byte("/")}
|
||||
var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
||||
var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
||||
|
||||
func isSafeLink(link []byte) bool {
|
||||
for _, path := range validPaths {
|
||||
if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
|
||||
if len(link) == len(path) {
|
||||
return true
|
||||
} else if isalnum(link[len(path)]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, prefix := range validUris {
|
||||
// TODO: handle unicode here
|
||||
// case-insensitive prefix test
|
||||
@@ -887,7 +941,7 @@ func isMailtoAutoLink(data []byte) int {
|
||||
|
||||
// look for the next emph char, skipping other constructs
|
||||
func helperFindEmphChar(data []byte, c byte) int {
|
||||
i := 1
|
||||
i := 0
|
||||
|
||||
for i < len(data) {
|
||||
for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
|
||||
@@ -896,15 +950,14 @@ func helperFindEmphChar(data []byte, c byte) int {
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
if data[i] == c {
|
||||
return i
|
||||
}
|
||||
|
||||
// do not count escaped chars
|
||||
if i != 0 && data[i-1] == '\\' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if data[i] == c {
|
||||
return i
|
||||
}
|
||||
|
||||
if data[i] == '`' {
|
||||
// skip a code span
|
||||
|
||||
2
vendor/github.com/russross/blackfriday/latex.go
generated
vendored
2
vendor/github.com/russross/blackfriday/latex.go
generated
vendored
@@ -259,7 +259,7 @@ func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
|
||||
}
|
||||
|
||||
func needsBackslash(c byte) bool {
|
||||
for _, r := range []byte("_{}%$&\\~") {
|
||||
for _, r := range []byte("_{}%$&\\~#") {
|
||||
if c == r {
|
||||
return true
|
||||
}
|
||||
|
||||
194
vendor/github.com/russross/blackfriday/markdown.go
generated
vendored
194
vendor/github.com/russross/blackfriday/markdown.go
generated
vendored
@@ -20,10 +20,12 @@ package blackfriday
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const VERSION = "1.1"
|
||||
const VERSION = "1.4"
|
||||
|
||||
// These are the supported markdown parsing extensions.
|
||||
// OR these values together to select multiple extensions.
|
||||
@@ -38,15 +40,18 @@ const (
|
||||
EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
|
||||
EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
|
||||
EXTENSION_FOOTNOTES // Pandoc-style footnotes
|
||||
EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, order list, unorder list)block
|
||||
EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
||||
EXTENSION_HEADER_IDS // specify header IDs with {#id}
|
||||
EXTENSION_TITLEBLOCK // Titleblock ala pandoc
|
||||
EXTENSION_AUTO_HEADER_IDS // Create the header ID from the text
|
||||
EXTENSION_BACKSLASH_LINE_BREAK // translate trailing backslashes into line breaks
|
||||
EXTENSION_DEFINITION_LISTS // render definition lists
|
||||
|
||||
commonHtmlFlags = 0 |
|
||||
HTML_USE_XHTML |
|
||||
HTML_USE_SMARTYPANTS |
|
||||
HTML_SMARTYPANTS_FRACTIONS |
|
||||
HTML_SMARTYPANTS_DASHES |
|
||||
HTML_SMARTYPANTS_LATEX_DASHES
|
||||
|
||||
commonExtensions = 0 |
|
||||
@@ -56,7 +61,9 @@ const (
|
||||
EXTENSION_AUTOLINK |
|
||||
EXTENSION_STRIKETHROUGH |
|
||||
EXTENSION_SPACE_HEADERS |
|
||||
EXTENSION_HEADER_IDS
|
||||
EXTENSION_HEADER_IDS |
|
||||
EXTENSION_BACKSLASH_LINE_BREAK |
|
||||
EXTENSION_DEFINITION_LISTS
|
||||
)
|
||||
|
||||
// These are the possible flag values for the link renderer.
|
||||
@@ -73,6 +80,8 @@ const (
|
||||
// These are mostly of interest if you are writing a new output format.
|
||||
const (
|
||||
LIST_TYPE_ORDERED = 1 << iota
|
||||
LIST_TYPE_DEFINITION
|
||||
LIST_TYPE_TERM
|
||||
LIST_ITEM_CONTAINS_BLOCK
|
||||
LIST_ITEM_BEGINNING_OF_LIST
|
||||
LIST_ITEM_END_OF_LIST
|
||||
@@ -93,45 +102,49 @@ const (
|
||||
TAB_SIZE_EIGHT = 8
|
||||
)
|
||||
|
||||
// These are the tags that are recognized as HTML block tags.
|
||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
||||
// Any of these can be included in markdown text without special escaping.
|
||||
var blockTags = map[string]bool{
|
||||
"p": true,
|
||||
"dl": true,
|
||||
"h1": true,
|
||||
"h2": true,
|
||||
"h3": true,
|
||||
"h4": true,
|
||||
"h5": true,
|
||||
"h6": true,
|
||||
"ol": true,
|
||||
"ul": true,
|
||||
"del": true,
|
||||
"div": true,
|
||||
"ins": true,
|
||||
"pre": true,
|
||||
"form": true,
|
||||
"math": true,
|
||||
"table": true,
|
||||
"iframe": true,
|
||||
"script": true,
|
||||
"fieldset": true,
|
||||
"noscript": true,
|
||||
"blockquote": true,
|
||||
var blockTags = map[string]struct{}{
|
||||
"blockquote": struct{}{},
|
||||
"del": struct{}{},
|
||||
"div": struct{}{},
|
||||
"dl": struct{}{},
|
||||
"fieldset": struct{}{},
|
||||
"form": struct{}{},
|
||||
"h1": struct{}{},
|
||||
"h2": struct{}{},
|
||||
"h3": struct{}{},
|
||||
"h4": struct{}{},
|
||||
"h5": struct{}{},
|
||||
"h6": struct{}{},
|
||||
"iframe": struct{}{},
|
||||
"ins": struct{}{},
|
||||
"math": struct{}{},
|
||||
"noscript": struct{}{},
|
||||
"ol": struct{}{},
|
||||
"pre": struct{}{},
|
||||
"p": struct{}{},
|
||||
"script": struct{}{},
|
||||
"style": struct{}{},
|
||||
"table": struct{}{},
|
||||
"ul": struct{}{},
|
||||
|
||||
// HTML5
|
||||
"video": true,
|
||||
"aside": true,
|
||||
"canvas": true,
|
||||
"figure": true,
|
||||
"footer": true,
|
||||
"header": true,
|
||||
"hgroup": true,
|
||||
"output": true,
|
||||
"article": true,
|
||||
"section": true,
|
||||
"progress": true,
|
||||
"figcaption": true,
|
||||
"address": struct{}{},
|
||||
"article": struct{}{},
|
||||
"aside": struct{}{},
|
||||
"canvas": struct{}{},
|
||||
"figcaption": struct{}{},
|
||||
"figure": struct{}{},
|
||||
"footer": struct{}{},
|
||||
"header": struct{}{},
|
||||
"hgroup": struct{}{},
|
||||
"main": struct{}{},
|
||||
"nav": struct{}{},
|
||||
"output": struct{}{},
|
||||
"progress": struct{}{},
|
||||
"section": struct{}{},
|
||||
"video": struct{}{},
|
||||
}
|
||||
|
||||
// Renderer is the rendering interface.
|
||||
@@ -196,6 +209,7 @@ type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) in
|
||||
// This is constructed by the Markdown function.
|
||||
type parser struct {
|
||||
r Renderer
|
||||
refOverride ReferenceOverrideFunc
|
||||
refs map[string]*reference
|
||||
inlineCallback [256]inlineParser
|
||||
flags int
|
||||
@@ -209,12 +223,74 @@ type parser struct {
|
||||
notes []*reference
|
||||
}
|
||||
|
||||
func (p *parser) getRef(refid string) (ref *reference, found bool) {
|
||||
if p.refOverride != nil {
|
||||
r, overridden := p.refOverride(refid)
|
||||
if overridden {
|
||||
if r == nil {
|
||||
return nil, false
|
||||
}
|
||||
return &reference{
|
||||
link: []byte(r.Link),
|
||||
title: []byte(r.Title),
|
||||
noteId: 0,
|
||||
hasBlock: false,
|
||||
text: []byte(r.Text)}, true
|
||||
}
|
||||
}
|
||||
// refs are case insensitive
|
||||
ref, found = p.refs[strings.ToLower(refid)]
|
||||
return ref, found
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// Public interface
|
||||
//
|
||||
//
|
||||
|
||||
// Reference represents the details of a link.
|
||||
// See the documentation in Options for more details on use-case.
|
||||
type Reference struct {
|
||||
// Link is usually the URL the reference points to.
|
||||
Link string
|
||||
// Title is the alternate text describing the link in more detail.
|
||||
Title string
|
||||
// Text is the optional text to override the ref with if the syntax used was
|
||||
// [refid][]
|
||||
Text string
|
||||
}
|
||||
|
||||
// ReferenceOverrideFunc is expected to be called with a reference string and
|
||||
// return either a valid Reference type that the reference string maps to or
|
||||
// nil. If overridden is false, the default reference logic will be executed.
|
||||
// See the documentation in Options for more details on use-case.
|
||||
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
||||
|
||||
// Options represents configurable overrides and callbacks (in addition to the
|
||||
// extension flag set) for configuring a Markdown parse.
|
||||
type Options struct {
|
||||
// Extensions is a flag set of bit-wise ORed extension bits. See the
|
||||
// EXTENSION_* flags defined in this package.
|
||||
Extensions int
|
||||
|
||||
// ReferenceOverride is an optional function callback that is called every
|
||||
// time a reference is resolved.
|
||||
//
|
||||
// In Markdown, the link reference syntax can be made to resolve a link to
|
||||
// a reference instead of an inline URL, in one of the following ways:
|
||||
//
|
||||
// * [link text][refid]
|
||||
// * [refid][]
|
||||
//
|
||||
// Usually, the refid is defined at the bottom of the Markdown document. If
|
||||
// this override function is provided, the refid is passed to the override
|
||||
// function first, before consulting the defined refids at the bottom. If
|
||||
// the override function indicates an override did not occur, the refids at
|
||||
// the bottom will be used to fill in the link details.
|
||||
ReferenceOverride ReferenceOverrideFunc
|
||||
}
|
||||
|
||||
// MarkdownBasic is a convenience function for simple rendering.
|
||||
// It processes markdown input with no extensions enabled.
|
||||
func MarkdownBasic(input []byte) []byte {
|
||||
@@ -223,9 +299,7 @@ func MarkdownBasic(input []byte) []byte {
|
||||
renderer := HtmlRenderer(htmlFlags, "", "")
|
||||
|
||||
// set up the parser
|
||||
extensions := 0
|
||||
|
||||
return Markdown(input, renderer, extensions)
|
||||
return MarkdownOptions(input, renderer, Options{Extensions: 0})
|
||||
}
|
||||
|
||||
// Call Markdown with most useful extensions enabled
|
||||
@@ -250,7 +324,8 @@ func MarkdownBasic(input []byte) []byte {
|
||||
func MarkdownCommon(input []byte) []byte {
|
||||
// set up the HTML renderer
|
||||
renderer := HtmlRenderer(commonHtmlFlags, "", "")
|
||||
return Markdown(input, renderer, commonExtensions)
|
||||
return MarkdownOptions(input, renderer, Options{
|
||||
Extensions: commonExtensions})
|
||||
}
|
||||
|
||||
// Markdown is the main rendering function.
|
||||
@@ -261,15 +336,25 @@ func MarkdownCommon(input []byte) []byte {
|
||||
// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
|
||||
// LatexRenderer, respectively.
|
||||
func Markdown(input []byte, renderer Renderer, extensions int) []byte {
|
||||
return MarkdownOptions(input, renderer, Options{
|
||||
Extensions: extensions})
|
||||
}
|
||||
|
||||
// MarkdownOptions is just like Markdown but takes additional options through
|
||||
// the Options struct.
|
||||
func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
|
||||
// no point in parsing if we can't render
|
||||
if renderer == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
extensions := opts.Extensions
|
||||
|
||||
// fill in the render structure
|
||||
p := new(parser)
|
||||
p.r = renderer
|
||||
p.flags = extensions
|
||||
p.refOverride = opts.ReferenceOverride
|
||||
p.refs = make(map[string]*reference)
|
||||
p.maxNesting = 16
|
||||
p.insideLink = false
|
||||
@@ -305,7 +390,6 @@ func Markdown(input []byte, renderer Renderer, extensions int) []byte {
|
||||
// - expand tabs
|
||||
// - normalize newlines
|
||||
// - copy everything else
|
||||
// - add missing newlines before fenced code blocks
|
||||
func firstPass(p *parser, input []byte) []byte {
|
||||
var out bytes.Buffer
|
||||
tabSize := TAB_SIZE_DEFAULT
|
||||
@@ -313,7 +397,6 @@ func firstPass(p *parser, input []byte) []byte {
|
||||
tabSize = TAB_SIZE_EIGHT
|
||||
}
|
||||
beg, end := 0, 0
|
||||
lastLineWasBlank := false
|
||||
lastFencedCodeBlockEnd := 0
|
||||
for beg < len(input) { // iterate over lines
|
||||
if end = isReference(p, input[beg:], tabSize); end > 0 {
|
||||
@@ -325,16 +408,13 @@ func firstPass(p *parser, input []byte) []byte {
|
||||
}
|
||||
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
// when last line was none blank and a fenced code block comes after
|
||||
// track fenced code block boundaries to suppress tab expansion
|
||||
// inside them:
|
||||
if beg >= lastFencedCodeBlockEnd {
|
||||
if i := p.fencedCode(&out, input[beg:], false); i > 0 {
|
||||
if !lastLineWasBlank {
|
||||
out.WriteByte('\n') // need to inject additional linebreak
|
||||
}
|
||||
lastFencedCodeBlockEnd = beg + i
|
||||
}
|
||||
}
|
||||
lastLineWasBlank = end == beg
|
||||
}
|
||||
|
||||
// add the line body if present
|
||||
@@ -376,7 +456,8 @@ func secondPass(p *parser, input []byte) []byte {
|
||||
if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
|
||||
p.r.Footnotes(&output, func() bool {
|
||||
flags := LIST_ITEM_BEGINNING_OF_LIST
|
||||
for _, ref := range p.notes {
|
||||
for i := 0; i < len(p.notes); i += 1 {
|
||||
ref := p.notes[i]
|
||||
var buf bytes.Buffer
|
||||
if ref.hasBlock {
|
||||
flags |= LIST_ITEM_CONTAINS_BLOCK
|
||||
@@ -436,6 +517,12 @@ type reference struct {
|
||||
title []byte
|
||||
noteId int // 0 if not a footnote ref
|
||||
hasBlock bool
|
||||
text []byte
|
||||
}
|
||||
|
||||
func (r *reference) String() string {
|
||||
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}",
|
||||
r.link, r.title, r.text, r.noteId, r.hasBlock)
|
||||
}
|
||||
|
||||
// Check whether or not data starts with a reference link.
|
||||
@@ -461,7 +548,7 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
||||
}
|
||||
i++
|
||||
if p.flags&EXTENSION_FOOTNOTES != 0 {
|
||||
if data[i] == '^' {
|
||||
if i < len(data) && data[i] == '^' {
|
||||
// we can set it to anything here because the proper noteIds will
|
||||
// be assigned later during the second pass. It just has to be != 0
|
||||
noteId = 1
|
||||
@@ -551,6 +638,9 @@ func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffse
|
||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||||
i++
|
||||
}
|
||||
if i == len(data) {
|
||||
return
|
||||
}
|
||||
linkEnd = i
|
||||
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
||||
linkOffset++
|
||||
|
||||
10
vendor/github.com/russross/blackfriday/smartypants.go
generated
vendored
10
vendor/github.com/russross/blackfriday/smartypants.go
generated
vendored
@@ -378,10 +378,12 @@ func smartypants(flags int) *smartypantsRenderer {
|
||||
}
|
||||
r['\''] = smartSingleQuote
|
||||
r['('] = smartParens
|
||||
if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
|
||||
r['-'] = smartDash
|
||||
} else {
|
||||
r['-'] = smartDashLatex
|
||||
if flags&HTML_SMARTYPANTS_DASHES != 0 {
|
||||
if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
|
||||
r['-'] = smartDash
|
||||
} else {
|
||||
r['-'] = smartDashLatex
|
||||
}
|
||||
}
|
||||
r['.'] = smartPeriod
|
||||
if flags&HTML_SMARTYPANTS_FRACTIONS == 0 {
|
||||
|
||||
Reference in New Issue
Block a user