merge lexer package into gherkin

Этот коммит содержится в:
gedi 2015-06-11 10:36:48 +03:00
родитель 9bacb9cc6f
коммит 1cc5fde508
11 изменённых файлов: 194 добавлений и 213 удалений

Просмотреть файл

@ -1,10 +1,8 @@
package gherkin
import "github.com/DATA-DOG/godog/gherkin/lexer"
type item struct {
next, prev *item
value *lexer.Token
value *Token
}
// AST is a linked list to store gherkin Tokens
@ -18,7 +16,7 @@ func newAST() *AST {
return &AST{}
}
func (l *AST) addTail(t *lexer.Token) *item {
func (l *AST) addTail(t *Token) *item {
it := &item{next: nil, prev: l.tail, value: t}
if l.head == nil {
l.head = it
@ -29,7 +27,7 @@ func (l *AST) addTail(t *lexer.Token) *item {
return l.tail
}
func (l *AST) addBefore(t *lexer.Token, i *item) *item {
func (l *AST) addBefore(t *Token, i *item) *item {
it := &item{next: i, prev: i.prev, value: t}
i.prev = it
if it.prev == nil {
@ -38,7 +36,7 @@ func (l *AST) addBefore(t *lexer.Token, i *item) *item {
return it
}
func (l *AST) addAfter(t *lexer.Token, i *item) *item {
func (l *AST) addAfter(t *Token, i *item) *item {
it := &item{next: i.next, prev: i, value: t}
i.next = it
if it.next == nil {

Просмотреть файл

@ -2,11 +2,9 @@ package gherkin
import (
"testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
)
func (a *AST) assertMatchesTypes(expected []lexer.TokenType, t *testing.T) {
func (a *AST) assertMatchesTypes(expected []TokenType, t *testing.T) {
key := -1
for item := a.head; item != nil; item = item.next {
key += 1

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import (
"strings"
"testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
)
var testFeatureSamples = map[string]string{
@ -34,7 +32,7 @@ func (f *Feature) assertHasNumScenarios(n int, t *testing.T) {
func Test_parse_normal_feature(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["feature"])),
lx: newLexer(strings.NewReader(testFeatureSamples["feature"])),
path: "some.feature",
ast: newAST(),
}
@ -49,17 +47,17 @@ func Test_parse_normal_feature(t *testing.T) {
t.Fatalf("expected a feature description to be available")
}
ft.AST.assertMatchesTypes([]lexer.TokenType{
lexer.FEATURE,
lexer.TEXT,
lexer.TEXT,
lexer.TEXT,
ft.AST.assertMatchesTypes([]TokenType{
FEATURE,
TEXT,
TEXT,
TEXT,
}, t)
}
func Test_parse_feature_without_description(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["only_title"])),
lx: newLexer(strings.NewReader(testFeatureSamples["only_title"])),
path: "some.feature",
ast: newAST(),
}
@ -74,14 +72,14 @@ func Test_parse_feature_without_description(t *testing.T) {
t.Fatalf("feature description was not expected")
}
ft.AST.assertMatchesTypes([]lexer.TokenType{
lexer.FEATURE,
ft.AST.assertMatchesTypes([]TokenType{
FEATURE,
}, t)
}
func Test_parse_empty_feature_file(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["empty"])),
lx: newLexer(strings.NewReader(testFeatureSamples["empty"])),
path: "some.feature",
ast: newAST(),
}
@ -93,7 +91,7 @@ func Test_parse_empty_feature_file(t *testing.T) {
func Test_parse_invalid_feature_with_random_text(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["invalid"])),
lx: newLexer(strings.NewReader(testFeatureSamples["invalid"])),
path: "some.feature",
ast: newAST(),
}
@ -101,14 +99,14 @@ func Test_parse_invalid_feature_with_random_text(t *testing.T) {
if err == nil {
t.Fatalf("expected an error but got none")
}
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.TEXT,
p.ast.assertMatchesTypes([]TokenType{
TEXT,
}, t)
}
func Test_parse_feature_with_newlines(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["starts_with_newlines"])),
lx: newLexer(strings.NewReader(testFeatureSamples["starts_with_newlines"])),
path: "some.feature",
ast: newAST(),
}
@ -123,9 +121,9 @@ func Test_parse_feature_with_newlines(t *testing.T) {
t.Fatalf("feature description was not expected")
}
ft.AST.assertMatchesTypes([]lexer.TokenType{
lexer.NEW_LINE,
lexer.NEW_LINE,
lexer.FEATURE,
ft.AST.assertMatchesTypes([]TokenType{
NEW_LINE,
NEW_LINE,
FEATURE,
}, t)
}

Просмотреть файл

@ -64,8 +64,6 @@ import (
"os"
"strings"
"unicode"
"github.com/DATA-DOG/godog/gherkin/lexer"
)
// Tag is gherkin feature or scenario tag.
@ -142,12 +140,12 @@ type Table struct {
rows [][]string
}
var allSteps = []lexer.TokenType{
lexer.GIVEN,
lexer.WHEN,
lexer.THEN,
lexer.AND,
lexer.BUT,
var allSteps = []TokenType{
GIVEN,
WHEN,
THEN,
AND,
BUT,
}
// ErrEmpty is returned in case if feature file
@ -155,10 +153,10 @@ var allSteps = []lexer.TokenType{
var ErrEmpty = errors.New("the feature file is empty")
type parser struct {
lx *lexer.Lexer
lx *lexer
path string
ast *AST
peeked *lexer.Token
peeked *Token
}
// Parse the feature file on the given path into
@ -172,15 +170,15 @@ func Parse(path string) (*Feature, error) {
defer file.Close()
return (&parser{
lx: lexer.New(file),
lx: newLexer(file),
path: path,
ast: newAST(),
}).parseFeature()
}
// reads tokens into AST and skips comments or new lines
func (p *parser) next() *lexer.Token {
if p.ast.tail != nil && p.ast.tail.value.Type == lexer.EOF {
func (p *parser) next() *Token {
if p.ast.tail != nil && p.ast.tail.value.Type == EOF {
return p.ast.tail.value // has reached EOF, do not record it more than once
}
tok := p.peek()
@ -190,12 +188,12 @@ func (p *parser) next() *lexer.Token {
}
// peaks into next token, skips comments or new lines
func (p *parser) peek() *lexer.Token {
func (p *parser) peek() *Token {
if p.peeked != nil {
return p.peeked
}
for p.peeked = p.lx.Next(); p.peeked.OfType(lexer.COMMENT, lexer.NEW_LINE); p.peeked = p.lx.Next() {
for p.peeked = p.lx.read(); p.peeked.OfType(COMMENT, NEW_LINE); p.peeked = p.lx.read() {
p.ast.addTail(p.peeked) // record comments and newlines
}
@ -210,28 +208,28 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
ft = &Feature{Path: p.path, AST: p.ast}
switch p.peek().Type {
case lexer.EOF:
case EOF:
return ft, ErrEmpty
case lexer.TAGS:
case TAGS:
ft.Tags = p.parseTags()
}
tok := p.next()
if tok.Type != lexer.FEATURE {
if tok.Type != FEATURE {
return ft, p.err("expected a file to begin with a feature definition, but got '"+tok.Type.String()+"' instead", tok.Line)
}
ft.Title = tok.Value
ft.Comment = tok.Comment
var desc []string
for ; p.peek().Type == lexer.TEXT; tok = p.next() {
for ; p.peek().Type == TEXT; tok = p.next() {
desc = append(desc, tok.Value)
}
ft.Description = strings.Join(desc, "\n")
for tok = p.peek(); tok.Type != lexer.EOF; tok = p.peek() {
for tok = p.peek(); tok.Type != EOF; tok = p.peek() {
// there may be a background
if tok.Type == lexer.BACKGROUND {
if tok.Type == BACKGROUND {
if ft.Background != nil {
return ft, p.err("there can only be a single background section, but found another", tok.Line)
}
@ -247,7 +245,7 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
// there may be tags before scenario
sc := &Scenario{}
sc.Tags = append(sc.Tags, ft.Tags...)
if tok.Type == lexer.TAGS {
if tok.Type == TAGS {
for _, t := range p.parseTags() {
if !sc.Tags.Has(t) {
sc.Tags = append(sc.Tags, t)
@ -257,7 +255,7 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
}
// there must be a scenario otherwise
if tok.Type != lexer.SCENARIO {
if tok.Type != SCENARIO {
return ft, p.err("expected a scenario, but got '"+tok.Type.String()+"' instead", tok.Line)
}
@ -277,13 +275,13 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
for tok := p.peek(); tok.OfType(allSteps...); tok = p.peek() {
step := &Step{Text: tok.Value, Comment: tok.Comment}
switch tok.Type {
case lexer.GIVEN:
case GIVEN:
step.Type = Given
case lexer.WHEN:
case WHEN:
step.Type = When
case lexer.THEN:
case THEN:
step.Type = Then
case lexer.AND, lexer.BUT:
case AND, BUT:
if len(steps) > 0 {
step.Type = steps[len(steps)-1].Type
} else {
@ -295,11 +293,11 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
if step.Text[len(step.Text)-1] == ':' {
tok = p.peek()
switch tok.Type {
case lexer.PYSTRING:
case PYSTRING:
if err := p.parsePystring(step); err != nil {
return steps, err
}
case lexer.TABLE_ROW:
case TABLE_ROW:
if err := p.parseTable(step); err != nil {
return steps, err
}
@ -315,13 +313,13 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
}
func (p *parser) parsePystring(s *Step) error {
var tok *lexer.Token
var tok *Token
started := p.next() // skip the start of pystring
var lines []string
for tok = p.next(); !tok.OfType(lexer.EOF, lexer.PYSTRING); tok = p.next() {
for tok = p.next(); !tok.OfType(EOF, PYSTRING); tok = p.next() {
lines = append(lines, tok.Text)
}
if tok.Type == lexer.EOF {
if tok.Type == EOF {
return fmt.Errorf("pystring which was opened on %s:%d was not closed", p.path, started.Line)
}
s.PyString = &PyString{Body: strings.Join(lines, "\n")}
@ -330,7 +328,7 @@ func (p *parser) parsePystring(s *Step) error {
func (p *parser) parseTable(s *Step) error {
s.Table = &Table{}
for row := p.peek(); row.Type == lexer.TABLE_ROW; row = p.peek() {
for row := p.peek(); row.Type == TABLE_ROW; row = p.peek() {
var cols []string
for _, r := range strings.Split(strings.Trim(row.Value, "|"), "|") {
cols = append(cols, strings.TrimFunc(r, unicode.IsSpace))

Просмотреть файл

@ -1,24 +1,36 @@
package lexer
package gherkin
import (
"bufio"
"io"
"regexp"
"strings"
"unicode"
)
type Lexer struct {
var matchers = map[string]*regexp.Regexp{
"feature": regexp.MustCompile("^(\\s*)Feature:\\s*([^#]*)(#.*)?"),
"scenario": regexp.MustCompile("^(\\s*)Scenario:\\s*([^#]*)(#.*)?"),
"background": regexp.MustCompile("^(\\s*)Background:(\\s*#.*)?"),
"step": regexp.MustCompile("^(\\s*)(Given|When|Then|And|But)\\s+([^#]*)(#.*)?"),
"comment": regexp.MustCompile("^(\\s*)#(.+)"),
"pystring": regexp.MustCompile("^(\\s*)\\\"\\\"\\\""),
"tags": regexp.MustCompile("^(\\s*)@([^#]*)(#.*)?"),
"table_row": regexp.MustCompile("^(\\s*)\\|([^#]*)(#.*)?"),
}
type lexer struct {
reader *bufio.Reader
lines int
}
func New(r io.Reader) *Lexer {
return &Lexer{
func newLexer(r io.Reader) *lexer {
return &lexer{
reader: bufio.NewReader(r),
}
}
func (l *Lexer) Next() *Token {
func (l *lexer) read() *Token {
line, err := l.reader.ReadString(byte('\n'))
if err != nil && len(line) == 0 {
return &Token{

Просмотреть файл

@ -1,14 +0,0 @@
package lexer
import "regexp"
var matchers = map[string]*regexp.Regexp{
"feature": regexp.MustCompile("^(\\s*)Feature:\\s*([^#]*)(#.*)?"),
"scenario": regexp.MustCompile("^(\\s*)Scenario:\\s*([^#]*)(#.*)?"),
"background": regexp.MustCompile("^(\\s*)Background:(\\s*#.*)?"),
"step": regexp.MustCompile("^(\\s*)(Given|When|Then|And|But)\\s+([^#]*)(#.*)?"),
"comment": regexp.MustCompile("^(\\s*)#(.+)"),
"pystring": regexp.MustCompile("^(\\s*)\\\"\\\"\\\""),
"tags": regexp.MustCompile("^(\\s*)@([^#]*)(#.*)?"),
"table_row": regexp.MustCompile("^(\\s*)\\|([^#]*)(#.*)?"),
}

Просмотреть файл

@ -1,18 +0,0 @@
package lexer
type Token struct {
Type TokenType // type of token
Line, Indent int // line and indentation number
Value string // interpreted value
Text string // same text as read
Comment string // a comment
}
func (t *Token) OfType(all ...TokenType) bool {
for _, typ := range all {
if typ == t.Type {
return true
}
}
return false
}

Просмотреть файл

@ -1,11 +1,11 @@
package lexer
package gherkin
import (
"strings"
"testing"
)
var samples = map[string]string{
var lexerSamples = map[string]string{
"feature": `Feature: gherkin lexer
in order to run features
as gherkin lexer
@ -29,13 +29,9 @@ var samples = map[string]string{
| John | Doe | 79 |`,
}
func indent(n int, s string) string {
return strings.Repeat(" ", n) + s
}
func Test_feature_read(t *testing.T) {
l := New(strings.NewReader(samples["feature"]))
tok := l.Next()
l := newLexer(strings.NewReader(lexerSamples["feature"]))
tok := l.read()
if tok.Type != FEATURE {
t.Fatalf("Expected a 'feature' type, but got: '%s'", tok.Type)
}
@ -50,7 +46,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '0', but got: '%d'", tok.Indent)
}
tok = l.Next()
tok = l.read()
if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
}
@ -65,7 +61,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
}
tok = l.Next()
tok = l.read()
if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
}
@ -80,7 +76,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
}
tok = l.Next()
tok = l.read()
if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
}
@ -95,7 +91,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
}
tok = l.Next()
tok = l.read()
if tok.Type != EOF {
t.Fatalf("Expected an 'eof' type, but got: '%s'", tok.Type)
}
@ -103,21 +99,21 @@ func Test_feature_read(t *testing.T) {
func Test_minimal_feature(t *testing.T) {
file := strings.Join([]string{
samples["feature"] + "\n",
lexerSamples["feature"] + "\n",
indent(2, samples["background"]),
indent(4, samples["step_given"]) + "\n",
indent(2, lexerSamples["background"]),
indent(4, lexerSamples["step_given"]) + "\n",
indent(2, samples["comment"]),
indent(2, samples["scenario"]),
indent(4, samples["step_given"]),
indent(4, samples["step_when"]),
indent(4, samples["step_then"]),
indent(2, lexerSamples["comment"]),
indent(2, lexerSamples["scenario"]),
indent(4, lexerSamples["step_given"]),
indent(4, lexerSamples["step_when"]),
indent(4, lexerSamples["step_then"]),
}, "\n")
l := New(strings.NewReader(file))
l := newLexer(strings.NewReader(file))
var tokens []TokenType
for tok := l.Next(); tok.Type != EOF; tok = l.Next() {
for tok := l.read(); tok.Type != EOF; tok = l.read() {
tokens = append(tokens, tok.Type)
}
expected := []TokenType{
@ -146,16 +142,16 @@ func Test_minimal_feature(t *testing.T) {
func Test_table_row_reading(t *testing.T) {
file := strings.Join([]string{
indent(2, samples["background"]),
indent(4, samples["step_given_table"]),
indent(4, samples["step_given"]),
indent(2, lexerSamples["background"]),
indent(4, lexerSamples["step_given_table"]),
indent(4, lexerSamples["step_given"]),
}, "\n")
l := New(strings.NewReader(file))
l := newLexer(strings.NewReader(file))
var types []TokenType
var values []string
var indents []int
for tok := l.Next(); tok.Type != EOF; tok = l.Next() {
for tok := l.read(); tok.Type != EOF; tok = l.read() {
types = append(types, tok.Type)
values = append(values, tok.Value)
indents = append(indents, tok.Indent)

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import (
"strings"
"testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
)
func (s *Scenario) assertHasTag(tag string, t *testing.T) {
@ -41,7 +39,7 @@ func Test_parse_feature_file(t *testing.T) {
}, "\n")
p := &parser{
lx: lexer.New(strings.NewReader(content)),
lx: newLexer(strings.NewReader(content)),
path: "usual.feature",
ast: newAST(),
}
@ -51,36 +49,36 @@ func Test_parse_feature_file(t *testing.T) {
}
ft.assertTitle("gherkin parser", t)
ft.AST.assertMatchesTypes([]lexer.TokenType{
lexer.TAGS,
lexer.FEATURE,
lexer.TEXT,
lexer.TEXT,
lexer.TEXT,
lexer.NEW_LINE,
ft.AST.assertMatchesTypes([]TokenType{
TAGS,
FEATURE,
TEXT,
TEXT,
TEXT,
NEW_LINE,
lexer.BACKGROUND,
lexer.GIVEN,
lexer.TABLE_ROW,
lexer.NEW_LINE,
BACKGROUND,
GIVEN,
TABLE_ROW,
NEW_LINE,
lexer.SCENARIO,
lexer.GIVEN,
lexer.AND,
lexer.WHEN,
lexer.THEN,
lexer.NEW_LINE,
SCENARIO,
GIVEN,
AND,
WHEN,
THEN,
NEW_LINE,
lexer.TAGS,
lexer.SCENARIO,
lexer.GIVEN,
lexer.AND,
lexer.WHEN,
lexer.THEN,
lexer.NEW_LINE,
TAGS,
SCENARIO,
GIVEN,
AND,
WHEN,
THEN,
NEW_LINE,
lexer.TAGS,
lexer.SCENARIO,
TAGS,
SCENARIO,
}, t)
ft.assertHasNumScenarios(3, t)

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import (
"strings"
"testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
)
var testStepSamples = map[string]string{
@ -92,7 +90,7 @@ func (s *Step) assertTableRow(t *testing.T, num int, cols ...string) {
func Test_parse_basic_given_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given"])),
lx: newLexer(strings.NewReader(testStepSamples["given"])),
path: "some.feature",
ast: newAST(),
}
@ -108,15 +106,15 @@ func Test_parse_basic_given_step(t *testing.T) {
steps[0].assertText("I'm a step", t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
EOF,
}, t)
}
func Test_parse_step_with_comment(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_comment"])),
lx: newLexer(strings.NewReader(testStepSamples["step_comment"])),
path: "some.feature",
ast: newAST(),
}
@ -133,15 +131,15 @@ func Test_parse_step_with_comment(t *testing.T) {
steps[0].assertComment("sets admin permissions", t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
EOF,
}, t)
}
func Test_parse_hash_table_given_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given_table_hash"])),
lx: newLexer(strings.NewReader(testStepSamples["given_table_hash"])),
path: "some.feature",
ast: newAST(),
}
@ -158,16 +156,16 @@ func Test_parse_hash_table_given_step(t *testing.T) {
steps[0].assertTableRow(t, 0, "name", "John Doe")
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.TABLE_ROW,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
TABLE_ROW,
EOF,
}, t)
}
func Test_parse_table_given_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given_table"])),
lx: newLexer(strings.NewReader(testStepSamples["given_table"])),
path: "some.feature",
ast: newAST(),
}
@ -186,18 +184,18 @@ func Test_parse_table_given_step(t *testing.T) {
steps[0].assertTableRow(t, 2, "Jane", "Doe")
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.TABLE_ROW,
lexer.TABLE_ROW,
lexer.TABLE_ROW,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
TABLE_ROW,
TABLE_ROW,
TABLE_ROW,
EOF,
}, t)
}
func Test_parse_pystring_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["then_pystring"])),
lx: newLexer(strings.NewReader(testStepSamples["then_pystring"])),
path: "some.feature",
ast: newAST(),
}
@ -217,19 +215,19 @@ func Test_parse_pystring_step(t *testing.T) {
}, "\n"), t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.THEN,
lexer.PYSTRING,
lexer.TEXT,
lexer.AND, // we do not care what we parse inside PYSTRING even if its whole behat feature text
lexer.PYSTRING,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
THEN,
PYSTRING,
TEXT,
AND, // we do not care what we parse inside PYSTRING even if its whole behat feature text
PYSTRING,
EOF,
}, t)
}
func Test_parse_empty_pystring_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["when_pystring_empty"])),
lx: newLexer(strings.NewReader(testStepSamples["when_pystring_empty"])),
path: "some.feature",
ast: newAST(),
}
@ -246,17 +244,17 @@ func Test_parse_empty_pystring_step(t *testing.T) {
steps[0].assertPyString("", t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.WHEN,
lexer.PYSTRING,
lexer.PYSTRING,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
WHEN,
PYSTRING,
PYSTRING,
EOF,
}, t)
}
func Test_parse_unclosed_pystring_step(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["when_pystring_unclosed"])),
lx: newLexer(strings.NewReader(testStepSamples["when_pystring_unclosed"])),
path: "some.feature",
ast: newAST(),
}
@ -264,18 +262,18 @@ func Test_parse_unclosed_pystring_step(t *testing.T) {
if err == nil {
t.Fatalf("expected an error, but got none")
}
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.WHEN,
lexer.PYSTRING,
lexer.TEXT,
lexer.TEXT,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
WHEN,
PYSTRING,
TEXT,
TEXT,
EOF,
}, t)
}
func Test_parse_step_group(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_group"])),
lx: newLexer(strings.NewReader(testStepSamples["step_group"])),
path: "some.feature",
ast: newAST(),
}
@ -297,18 +295,18 @@ func Test_parse_step_group(t *testing.T) {
steps[3].assertText("something should happen", t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.AND,
lexer.WHEN,
lexer.THEN,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
AND,
WHEN,
THEN,
EOF,
}, t)
}
func Test_parse_another_step_group(t *testing.T) {
p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_group_another"])),
lx: newLexer(strings.NewReader(testStepSamples["step_group_another"])),
path: "some.feature",
ast: newAST(),
}
@ -330,11 +328,11 @@ func Test_parse_another_step_group(t *testing.T) {
steps[3].assertText("I expect the result", t)
p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{
lexer.GIVEN,
lexer.AND,
lexer.WHEN,
lexer.THEN,
lexer.EOF,
p.ast.assertMatchesTypes([]TokenType{
GIVEN,
AND,
WHEN,
THEN,
EOF,
}, t)
}

Просмотреть файл

@ -1,4 +1,4 @@
package lexer
package gherkin
type TokenType int
@ -64,3 +64,20 @@ func (t TokenType) String() string {
}
return "illegal"
}
type Token struct {
Type TokenType // type of token
Line, Indent int // line and indentation number
Value string // interpreted value
Text string // same text as read
Comment string // a comment
}
func (t *Token) OfType(all ...TokenType) bool {
for _, typ := range all {
if typ == t.Type {
return true
}
}
return false
}