Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/gohugoio/hugo.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2018-10-20 12:16:18 +0300
committerBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2018-10-22 21:46:13 +0300
commit129c27ee6e9fed98dbfebeaa272fd52757b475b2 (patch)
treeba931600714e354f0c7d05ad0a598f591b0258f6 /parser/pageparser
parent44da60d869578423dea529db62ed613588a2a560 (diff)
parser/metadecoders: Consolidate the metadata decoders
See #5324
Diffstat (limited to 'parser/pageparser')
-rw-r--r--parser/pageparser/item.go2
-rw-r--r--parser/pageparser/pagelexer.go34
-rw-r--r--parser/pageparser/pageparser.go10
-rw-r--r--parser/pageparser/pageparser_intro_test.go2
4 files changed, 23 insertions, 25 deletions
diff --git a/parser/pageparser/item.go b/parser/pageparser/item.go
index afc3b5fab..c6f6c3f38 100644
--- a/parser/pageparser/item.go
+++ b/parser/pageparser/item.go
@@ -20,7 +20,7 @@ import (
type Item struct {
Type ItemType
- pos pos
+ Pos Pos
Val []byte
}
diff --git a/parser/pageparser/pagelexer.go b/parser/pageparser/pagelexer.go
index a6a26016b..d3fc11bf2 100644
--- a/parser/pageparser/pagelexer.go
+++ b/parser/pageparser/pagelexer.go
@@ -25,7 +25,7 @@ import (
)
// position (in bytes)
-type pos int
+type Pos int
const eof = -1
@@ -47,9 +47,9 @@ type pageLexer struct {
input []byte
stateStart stateFunc
state stateFunc
- pos pos // input position
- start pos // item start position
- width pos // width of last element
+ pos Pos // input position
+ start Pos // item start position
+ width Pos // width of last element
// Set when we have parsed any summary divider
summaryDividerChecked bool
@@ -73,7 +73,7 @@ func (l *pageLexer) Input() []byte {
// note: the input position here is normally 0 (start), but
// can be set if position of first shortcode is known
// TODO(bep) 2errors byte
-func newPageLexer(input []byte, inputPosition pos, stateStart stateFunc) *pageLexer {
+func newPageLexer(input []byte, inputPosition Pos, stateStart stateFunc) *pageLexer {
lexer := &pageLexer{
input: input,
pos: inputPosition,
@@ -131,7 +131,7 @@ func (l *pageLexer) next() rune {
}
runeValue, runeWidth := utf8.DecodeRune(l.input[l.pos:])
- l.width = pos(runeWidth)
+ l.width = Pos(runeWidth)
l.pos += l.width
return runeValue
}
@@ -210,7 +210,7 @@ func lexMainSection(l *pageLexer) stateFunc {
l3 = l.index(leftDelimSc)
skip := minPositiveIndex(l1, l2, l3)
if skip > 0 {
- l.pos += pos(skip)
+ l.pos += Pos(skip)
}
for {
@@ -234,7 +234,7 @@ func lexMainSection(l *pageLexer) stateFunc {
l.emit(tText)
}
l.summaryDividerChecked = true
- l.pos += pos(len(summaryDivider))
+ l.pos += Pos(len(summaryDivider))
//l.consumeCRLF()
l.emit(TypeLeadSummaryDivider)
} else if l.hasPrefix(summaryDividerOrg) {
@@ -242,7 +242,7 @@ func lexMainSection(l *pageLexer) stateFunc {
l.emit(tText)
}
l.summaryDividerChecked = true
- l.pos += pos(len(summaryDividerOrg))
+ l.pos += Pos(len(summaryDividerOrg))
//l.consumeCRLF()
l.emit(TypeSummaryDividerOrg)
}
@@ -291,12 +291,12 @@ LOOP:
if right == -1 {
return l.errorf("starting HTML comment with no end")
}
- l.pos += pos(right) + pos(len(htmlCOmmentEnd))
+ l.pos += Pos(right) + Pos(len(htmlCOmmentEnd))
l.emit(TypeHTMLComment)
} else {
// Not need to look further. Hugo treats this as plain HTML,
// no front matter, no shortcodes, no nothing.
- l.pos = pos(len(l.input))
+ l.pos = Pos(len(l.input))
l.emit(TypeHTMLDocument)
}
}
@@ -434,7 +434,7 @@ func (l *pageLexer) lexFrontMatterSection(tp ItemType, delimr rune, name string,
}
func lexShortcodeLeftDelim(l *pageLexer) stateFunc {
- l.pos += pos(len(l.currentLeftShortcodeDelim()))
+ l.pos += Pos(len(l.currentLeftShortcodeDelim()))
if l.hasPrefix(leftComment) {
return lexShortcodeComment
}
@@ -451,20 +451,20 @@ func lexShortcodeComment(l *pageLexer) stateFunc {
}
// we emit all as text, except the comment markers
l.emit(tText)
- l.pos += pos(len(leftComment))
+ l.pos += Pos(len(leftComment))
l.ignore()
- l.pos += pos(posRightComment - len(leftComment))
+ l.pos += Pos(posRightComment - len(leftComment))
l.emit(tText)
- l.pos += pos(len(rightComment))
+ l.pos += Pos(len(rightComment))
l.ignore()
- l.pos += pos(len(l.currentRightShortcodeDelim()))
+ l.pos += Pos(len(l.currentRightShortcodeDelim()))
l.emit(tText)
return lexMainSection
}
func lexShortcodeRightDelim(l *pageLexer) stateFunc {
l.closingState = 0
- l.pos += pos(len(l.currentRightShortcodeDelim()))
+ l.pos += Pos(len(l.currentRightShortcodeDelim()))
l.emit(l.currentRightShortcodeDelimItem())
return lexMainSection
}
diff --git a/parser/pageparser/pageparser.go b/parser/pageparser/pageparser.go
index bc6f55dd8..0d32c0e89 100644
--- a/parser/pageparser/pageparser.go
+++ b/parser/pageparser/pageparser.go
@@ -48,7 +48,7 @@ func Parse(r io.Reader) (Result, error) {
}
func parseMainSection(input []byte, from int) Result {
- lexer := newPageLexer(input, pos(from), lexMainSection) // TODO(bep) 2errors
+ lexer := newPageLexer(input, Pos(from), lexMainSection) // TODO(bep) 2errors
lexer.run()
return lexer
}
@@ -57,7 +57,7 @@ func parseMainSection(input []byte, from int) Result {
// if needed.
type Iterator struct {
l *pageLexer
- lastPos pos // position of the last item returned by nextItem
+ lastPos Pos // position of the last item returned by nextItem
}
// consumes and returns the next item
@@ -69,7 +69,7 @@ func (t *Iterator) Next() Item {
var errIndexOutOfBounds = Item{tError, 0, []byte("no more tokens")}
func (t *Iterator) current() Item {
- if t.lastPos >= pos(len(t.l.items)) {
+ if t.lastPos >= Pos(len(t.l.items)) {
return errIndexOutOfBounds
}
return t.l.items[t.lastPos]
@@ -98,7 +98,7 @@ func (t *Iterator) Peek() Item {
// PeekWalk will feed the next items in the iterator to walkFn
// until it returns false.
func (t *Iterator) PeekWalk(walkFn func(item Item) bool) {
- for i := t.lastPos + 1; i < pos(len(t.l.items)); i++ {
+ for i := t.lastPos + 1; i < Pos(len(t.l.items)); i++ {
item := t.l.items[i]
if !walkFn(item) {
break
@@ -120,5 +120,5 @@ func (t *Iterator) Consume(cnt int) {
// LineNumber returns the current line number. Used for logging.
func (t *Iterator) LineNumber() int {
- return bytes.Count(t.l.input[:t.current().pos], lf) + 1
+ return bytes.Count(t.l.input[:t.current().Pos], lf) + 1
}
diff --git a/parser/pageparser/pageparser_intro_test.go b/parser/pageparser/pageparser_intro_test.go
index 850254ac7..1a8c2d237 100644
--- a/parser/pageparser/pageparser_intro_test.go
+++ b/parser/pageparser/pageparser_intro_test.go
@@ -59,9 +59,7 @@ var frontMatterTests = []lexerTest{
{"No front matter", "\nSome text.\n", []Item{tstSomeText, tstEOF}},
{"YAML front matter", "---\nfoo: \"bar\"\n---\n\nSome text.\n", []Item{tstFrontMatterYAML, tstSomeText, tstEOF}},
{"YAML empty front matter", "---\n---\n\nSome text.\n", []Item{nti(TypeFrontMatterYAML, "\n"), tstSomeText, tstEOF}},
-
{"YAML commented out front matter", "<!--\n---\nfoo: \"bar\"\n---\n-->\nSome text.\n", []Item{nti(TypeHTMLComment, "<!--\n---\nfoo: \"bar\"\n---\n-->"), tstSomeText, tstEOF}},
-
// Note that we keep all bytes as they are, but we need to handle CRLF
{"YAML front matter CRLF", "---\r\nfoo: \"bar\"\r\n---\n\nSome text.\n", []Item{tstFrontMatterYAMLCRLF, tstSomeText, tstEOF}},
{"TOML front matter", "+++\nfoo = \"bar\"\n+++\n\nSome text.\n", []Item{tstFrontMatterTOML, tstSomeText, tstEOF}},