Skip to content

Commit

Permalink
Allow non-alphanum characters in tag content
Browse files Browse the repository at this point in the history
The tag content should allow non-alphanum characters. See the spec for
section tag content [1]:

> These tags' content MUST be a non-whitespace character sequence NOT
> containing the current closing delimiter; ...

For the 3 added test cases, alexkappa#1 and alexkappa#3 will fail today, and alexkappa#2 will
trigger a panic. The code change fixes them.

The expected behavior can also be verified on
http://mustache.github.io/#demo, with Mustache:
1: {{#key*}}{{.}}{{/key*}}
2: {{#key}}{{*}}{{/key}}
2: {{#key}}{{*}*}}{{/key}}

and JSON:
{
  "key*": "value*",
  "key": "value",
  "*": "star",
  "*}*": "fish"
}

We can get output as:
1: value*
2: star
2: fish

[1] https://github.com/mustache/spec/blob/b1329a25e6d265ff360267d23f7c6327bbf59f52/specs/sections.yml#L5
  • Loading branch information
Xuewei Zhang committed Apr 9, 2021
1 parent 8bb9cfc commit e31dfb8
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 18 deletions.
24 changes: 9 additions & 15 deletions lex.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"bytes"
"fmt"
"strings"
"unicode"
"unicode/utf8"
)

Expand All @@ -30,7 +29,7 @@ type tokenType int
const (
tokenError tokenType = iota // error occurred; value is text of error
tokenEOF
tokenIdentifier // alphanumeric identifier
tokenIdentifier // tag identifier: non-whitespace characters NOT containing closing delimiter
tokenLeftDelim // {{ left action delimiter
tokenRightDelim // }} right action delimiter
tokenText // plain text
Expand Down Expand Up @@ -279,24 +278,24 @@ func stateTag(l *lexer) stateFn {
l.emit(tokenPartial)
case r == '{':
l.emit(tokenRawStart)
case alphanum(r):
default:
l.backup()
return stateIdent
default:
return l.errorf("unrecognized character in action: %#U", r)
}
return stateTag
}

// stateIdent scans an alphanumeric or field.
// stateIdent scans an partial tag or field.
func stateIdent(l *lexer) stateFn {
Loop:
for {
switch r := l.next(); {
case alphanum(r):
// absorb.
switch r := l.peek(); {
case r == eof:
return l.errorf("unclosed tag")
case !whitespace(r) && !strings.HasPrefix(l.input[l.pos:], l.rightDelim):
// absorb
l.next()
default:
l.backup()
l.emit(tokenIdentifier)
break Loop
}
Expand Down Expand Up @@ -365,8 +364,3 @@ func whitespace(r rune) bool {
}
return false
}

// alphanum reports whether r is an alphabetic, digit, or underscore.
func alphanum(r rune) bool {
return r == '_' || r == '.' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
6 changes: 3 additions & 3 deletions lex_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestLexer(t *testing.T) {
},
},
{
"\nfoo {{bar}} baz {{=| |=}}\r\n |foo| |={{ }}=| {{bar}}",
"\nfoo {{bar}} baz {{=| |=}}\r\n |foo| |={{! !}}=| {{!bar!}}",
[]token{
{typ: tokenText, val: "\nfoo "},
{typ: tokenLeftDelim, val: "{{"},
Expand All @@ -42,9 +42,9 @@ func TestLexer(t *testing.T) {
{typ: tokenText, val: " "},
{typ: tokenSetDelim},
{typ: tokenText, val: " "},
{typ: tokenLeftDelim, val: "{{"},
{typ: tokenLeftDelim, val: "{{!"},
{typ: tokenIdentifier, val: "bar"},
{typ: tokenRightDelim, val: "}}"},
{typ: tokenRightDelim, val: "!}}"},
{typ: tokenEOF},
},
},
Expand Down
49 changes: 49 additions & 0 deletions parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ package mustache

import (
"reflect"
"strings"
"testing"
)

Expand Down Expand Up @@ -56,6 +57,36 @@ func TestParser(t *testing.T) {
}},
},
},
{
"{{#*}}({{.}}){{/*}}",
[]node{
&sectionNode{"*", false, []node{
textNode("("),
&varNode{".", true},
textNode(")"),
}},
},
},
{
"{{#list}}({{*}}){{/list}}",
[]node{
&sectionNode{"list", false, []node{
textNode("("),
&varNode{"*", true},
textNode(")"),
}},
},
},
{
"{{#list}}({{a}a}}){{/list}}",
[]node{
&sectionNode{"list", false, []node{
textNode("("),
&varNode{"a}a", true},
textNode(")"),
}},
},
},
} {
parser := newParser(newLexer(test.template, "{{", "}}"))
elems, err := parser.parse()
Expand All @@ -69,3 +100,21 @@ func TestParser(t *testing.T) {
}
}
}

func TestParserNegative(t *testing.T) {
for _, test := range []struct {
template string
expErr string
}{
{
"{{foo}",
`1:6 syntax error: unreachable code t_error:"unclosed tag"`,
},
} {
parser := newParser(newLexer(test.template, "{{", "}}"))
_, err := parser.parse()
if err == nil || !strings.Contains(err.Error(), test.expErr) {
t.Errorf("expect error: %q, got %q", test.expErr, err)
}
}
}

0 comments on commit e31dfb8

Please sign in to comment.