merge lexer package into gherkin

Этот коммит содержится в:
gedi 2015-06-11 10:36:48 +03:00
родитель 9bacb9cc6f
коммит 1cc5fde508
11 изменённых файлов: 194 добавлений и 213 удалений

Просмотреть файл

@ -1,10 +1,8 @@
package gherkin package gherkin
import "github.com/DATA-DOG/godog/gherkin/lexer"
type item struct { type item struct {
next, prev *item next, prev *item
value *lexer.Token value *Token
} }
// AST is a linked list to store gherkin Tokens // AST is a linked list to store gherkin Tokens
@ -18,7 +16,7 @@ func newAST() *AST {
return &AST{} return &AST{}
} }
func (l *AST) addTail(t *lexer.Token) *item { func (l *AST) addTail(t *Token) *item {
it := &item{next: nil, prev: l.tail, value: t} it := &item{next: nil, prev: l.tail, value: t}
if l.head == nil { if l.head == nil {
l.head = it l.head = it
@ -29,7 +27,7 @@ func (l *AST) addTail(t *lexer.Token) *item {
return l.tail return l.tail
} }
func (l *AST) addBefore(t *lexer.Token, i *item) *item { func (l *AST) addBefore(t *Token, i *item) *item {
it := &item{next: i, prev: i.prev, value: t} it := &item{next: i, prev: i.prev, value: t}
i.prev = it i.prev = it
if it.prev == nil { if it.prev == nil {
@ -38,7 +36,7 @@ func (l *AST) addBefore(t *lexer.Token, i *item) *item {
return it return it
} }
func (l *AST) addAfter(t *lexer.Token, i *item) *item { func (l *AST) addAfter(t *Token, i *item) *item {
it := &item{next: i.next, prev: i, value: t} it := &item{next: i.next, prev: i, value: t}
i.next = it i.next = it
if it.next == nil { if it.next == nil {

Просмотреть файл

@ -2,11 +2,9 @@ package gherkin
import ( import (
"testing" "testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
) )
func (a *AST) assertMatchesTypes(expected []lexer.TokenType, t *testing.T) { func (a *AST) assertMatchesTypes(expected []TokenType, t *testing.T) {
key := -1 key := -1
for item := a.head; item != nil; item = item.next { for item := a.head; item != nil; item = item.next {
key += 1 key += 1

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import ( import (
"strings" "strings"
"testing" "testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
) )
var testFeatureSamples = map[string]string{ var testFeatureSamples = map[string]string{
@ -34,7 +32,7 @@ func (f *Feature) assertHasNumScenarios(n int, t *testing.T) {
func Test_parse_normal_feature(t *testing.T) { func Test_parse_normal_feature(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["feature"])), lx: newLexer(strings.NewReader(testFeatureSamples["feature"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -49,17 +47,17 @@ func Test_parse_normal_feature(t *testing.T) {
t.Fatalf("expected a feature description to be available") t.Fatalf("expected a feature description to be available")
} }
ft.AST.assertMatchesTypes([]lexer.TokenType{ ft.AST.assertMatchesTypes([]TokenType{
lexer.FEATURE, FEATURE,
lexer.TEXT, TEXT,
lexer.TEXT, TEXT,
lexer.TEXT, TEXT,
}, t) }, t)
} }
func Test_parse_feature_without_description(t *testing.T) { func Test_parse_feature_without_description(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["only_title"])), lx: newLexer(strings.NewReader(testFeatureSamples["only_title"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -74,14 +72,14 @@ func Test_parse_feature_without_description(t *testing.T) {
t.Fatalf("feature description was not expected") t.Fatalf("feature description was not expected")
} }
ft.AST.assertMatchesTypes([]lexer.TokenType{ ft.AST.assertMatchesTypes([]TokenType{
lexer.FEATURE, FEATURE,
}, t) }, t)
} }
func Test_parse_empty_feature_file(t *testing.T) { func Test_parse_empty_feature_file(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["empty"])), lx: newLexer(strings.NewReader(testFeatureSamples["empty"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -93,7 +91,7 @@ func Test_parse_empty_feature_file(t *testing.T) {
func Test_parse_invalid_feature_with_random_text(t *testing.T) { func Test_parse_invalid_feature_with_random_text(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["invalid"])), lx: newLexer(strings.NewReader(testFeatureSamples["invalid"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -101,14 +99,14 @@ func Test_parse_invalid_feature_with_random_text(t *testing.T) {
if err == nil { if err == nil {
t.Fatalf("expected an error but got none") t.Fatalf("expected an error but got none")
} }
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.TEXT, TEXT,
}, t) }, t)
} }
func Test_parse_feature_with_newlines(t *testing.T) { func Test_parse_feature_with_newlines(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testFeatureSamples["starts_with_newlines"])), lx: newLexer(strings.NewReader(testFeatureSamples["starts_with_newlines"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -123,9 +121,9 @@ func Test_parse_feature_with_newlines(t *testing.T) {
t.Fatalf("feature description was not expected") t.Fatalf("feature description was not expected")
} }
ft.AST.assertMatchesTypes([]lexer.TokenType{ ft.AST.assertMatchesTypes([]TokenType{
lexer.NEW_LINE, NEW_LINE,
lexer.NEW_LINE, NEW_LINE,
lexer.FEATURE, FEATURE,
}, t) }, t)
} }

Просмотреть файл

@ -64,8 +64,6 @@ import (
"os" "os"
"strings" "strings"
"unicode" "unicode"
"github.com/DATA-DOG/godog/gherkin/lexer"
) )
// Tag is gherkin feature or scenario tag. // Tag is gherkin feature or scenario tag.
@ -142,12 +140,12 @@ type Table struct {
rows [][]string rows [][]string
} }
var allSteps = []lexer.TokenType{ var allSteps = []TokenType{
lexer.GIVEN, GIVEN,
lexer.WHEN, WHEN,
lexer.THEN, THEN,
lexer.AND, AND,
lexer.BUT, BUT,
} }
// ErrEmpty is returned in case if feature file // ErrEmpty is returned in case if feature file
@ -155,10 +153,10 @@ var allSteps = []lexer.TokenType{
var ErrEmpty = errors.New("the feature file is empty") var ErrEmpty = errors.New("the feature file is empty")
type parser struct { type parser struct {
lx *lexer.Lexer lx *lexer
path string path string
ast *AST ast *AST
peeked *lexer.Token peeked *Token
} }
// Parse the feature file on the given path into // Parse the feature file on the given path into
@ -172,15 +170,15 @@ func Parse(path string) (*Feature, error) {
defer file.Close() defer file.Close()
return (&parser{ return (&parser{
lx: lexer.New(file), lx: newLexer(file),
path: path, path: path,
ast: newAST(), ast: newAST(),
}).parseFeature() }).parseFeature()
} }
// reads tokens into AST and skips comments or new lines // reads tokens into AST and skips comments or new lines
func (p *parser) next() *lexer.Token { func (p *parser) next() *Token {
if p.ast.tail != nil && p.ast.tail.value.Type == lexer.EOF { if p.ast.tail != nil && p.ast.tail.value.Type == EOF {
return p.ast.tail.value // has reached EOF, do not record it more than once return p.ast.tail.value // has reached EOF, do not record it more than once
} }
tok := p.peek() tok := p.peek()
@ -190,12 +188,12 @@ func (p *parser) next() *lexer.Token {
} }
// peaks into next token, skips comments or new lines // peaks into next token, skips comments or new lines
func (p *parser) peek() *lexer.Token { func (p *parser) peek() *Token {
if p.peeked != nil { if p.peeked != nil {
return p.peeked return p.peeked
} }
for p.peeked = p.lx.Next(); p.peeked.OfType(lexer.COMMENT, lexer.NEW_LINE); p.peeked = p.lx.Next() { for p.peeked = p.lx.read(); p.peeked.OfType(COMMENT, NEW_LINE); p.peeked = p.lx.read() {
p.ast.addTail(p.peeked) // record comments and newlines p.ast.addTail(p.peeked) // record comments and newlines
} }
@ -210,28 +208,28 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
ft = &Feature{Path: p.path, AST: p.ast} ft = &Feature{Path: p.path, AST: p.ast}
switch p.peek().Type { switch p.peek().Type {
case lexer.EOF: case EOF:
return ft, ErrEmpty return ft, ErrEmpty
case lexer.TAGS: case TAGS:
ft.Tags = p.parseTags() ft.Tags = p.parseTags()
} }
tok := p.next() tok := p.next()
if tok.Type != lexer.FEATURE { if tok.Type != FEATURE {
return ft, p.err("expected a file to begin with a feature definition, but got '"+tok.Type.String()+"' instead", tok.Line) return ft, p.err("expected a file to begin with a feature definition, but got '"+tok.Type.String()+"' instead", tok.Line)
} }
ft.Title = tok.Value ft.Title = tok.Value
ft.Comment = tok.Comment ft.Comment = tok.Comment
var desc []string var desc []string
for ; p.peek().Type == lexer.TEXT; tok = p.next() { for ; p.peek().Type == TEXT; tok = p.next() {
desc = append(desc, tok.Value) desc = append(desc, tok.Value)
} }
ft.Description = strings.Join(desc, "\n") ft.Description = strings.Join(desc, "\n")
for tok = p.peek(); tok.Type != lexer.EOF; tok = p.peek() { for tok = p.peek(); tok.Type != EOF; tok = p.peek() {
// there may be a background // there may be a background
if tok.Type == lexer.BACKGROUND { if tok.Type == BACKGROUND {
if ft.Background != nil { if ft.Background != nil {
return ft, p.err("there can only be a single background section, but found another", tok.Line) return ft, p.err("there can only be a single background section, but found another", tok.Line)
} }
@ -247,7 +245,7 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
// there may be tags before scenario // there may be tags before scenario
sc := &Scenario{} sc := &Scenario{}
sc.Tags = append(sc.Tags, ft.Tags...) sc.Tags = append(sc.Tags, ft.Tags...)
if tok.Type == lexer.TAGS { if tok.Type == TAGS {
for _, t := range p.parseTags() { for _, t := range p.parseTags() {
if !sc.Tags.Has(t) { if !sc.Tags.Has(t) {
sc.Tags = append(sc.Tags, t) sc.Tags = append(sc.Tags, t)
@ -257,7 +255,7 @@ func (p *parser) parseFeature() (ft *Feature, err error) {
} }
// there must be a scenario otherwise // there must be a scenario otherwise
if tok.Type != lexer.SCENARIO { if tok.Type != SCENARIO {
return ft, p.err("expected a scenario, but got '"+tok.Type.String()+"' instead", tok.Line) return ft, p.err("expected a scenario, but got '"+tok.Type.String()+"' instead", tok.Line)
} }
@ -277,13 +275,13 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
for tok := p.peek(); tok.OfType(allSteps...); tok = p.peek() { for tok := p.peek(); tok.OfType(allSteps...); tok = p.peek() {
step := &Step{Text: tok.Value, Comment: tok.Comment} step := &Step{Text: tok.Value, Comment: tok.Comment}
switch tok.Type { switch tok.Type {
case lexer.GIVEN: case GIVEN:
step.Type = Given step.Type = Given
case lexer.WHEN: case WHEN:
step.Type = When step.Type = When
case lexer.THEN: case THEN:
step.Type = Then step.Type = Then
case lexer.AND, lexer.BUT: case AND, BUT:
if len(steps) > 0 { if len(steps) > 0 {
step.Type = steps[len(steps)-1].Type step.Type = steps[len(steps)-1].Type
} else { } else {
@ -295,11 +293,11 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
if step.Text[len(step.Text)-1] == ':' { if step.Text[len(step.Text)-1] == ':' {
tok = p.peek() tok = p.peek()
switch tok.Type { switch tok.Type {
case lexer.PYSTRING: case PYSTRING:
if err := p.parsePystring(step); err != nil { if err := p.parsePystring(step); err != nil {
return steps, err return steps, err
} }
case lexer.TABLE_ROW: case TABLE_ROW:
if err := p.parseTable(step); err != nil { if err := p.parseTable(step); err != nil {
return steps, err return steps, err
} }
@ -315,13 +313,13 @@ func (p *parser) parseSteps() (steps []*Step, err error) {
} }
func (p *parser) parsePystring(s *Step) error { func (p *parser) parsePystring(s *Step) error {
var tok *lexer.Token var tok *Token
started := p.next() // skip the start of pystring started := p.next() // skip the start of pystring
var lines []string var lines []string
for tok = p.next(); !tok.OfType(lexer.EOF, lexer.PYSTRING); tok = p.next() { for tok = p.next(); !tok.OfType(EOF, PYSTRING); tok = p.next() {
lines = append(lines, tok.Text) lines = append(lines, tok.Text)
} }
if tok.Type == lexer.EOF { if tok.Type == EOF {
return fmt.Errorf("pystring which was opened on %s:%d was not closed", p.path, started.Line) return fmt.Errorf("pystring which was opened on %s:%d was not closed", p.path, started.Line)
} }
s.PyString = &PyString{Body: strings.Join(lines, "\n")} s.PyString = &PyString{Body: strings.Join(lines, "\n")}
@ -330,7 +328,7 @@ func (p *parser) parsePystring(s *Step) error {
func (p *parser) parseTable(s *Step) error { func (p *parser) parseTable(s *Step) error {
s.Table = &Table{} s.Table = &Table{}
for row := p.peek(); row.Type == lexer.TABLE_ROW; row = p.peek() { for row := p.peek(); row.Type == TABLE_ROW; row = p.peek() {
var cols []string var cols []string
for _, r := range strings.Split(strings.Trim(row.Value, "|"), "|") { for _, r := range strings.Split(strings.Trim(row.Value, "|"), "|") {
cols = append(cols, strings.TrimFunc(r, unicode.IsSpace)) cols = append(cols, strings.TrimFunc(r, unicode.IsSpace))

Просмотреть файл

@ -1,24 +1,36 @@
package lexer package gherkin
import ( import (
"bufio" "bufio"
"io" "io"
"regexp"
"strings" "strings"
"unicode" "unicode"
) )
type Lexer struct { var matchers = map[string]*regexp.Regexp{
"feature": regexp.MustCompile("^(\\s*)Feature:\\s*([^#]*)(#.*)?"),
"scenario": regexp.MustCompile("^(\\s*)Scenario:\\s*([^#]*)(#.*)?"),
"background": regexp.MustCompile("^(\\s*)Background:(\\s*#.*)?"),
"step": regexp.MustCompile("^(\\s*)(Given|When|Then|And|But)\\s+([^#]*)(#.*)?"),
"comment": regexp.MustCompile("^(\\s*)#(.+)"),
"pystring": regexp.MustCompile("^(\\s*)\\\"\\\"\\\""),
"tags": regexp.MustCompile("^(\\s*)@([^#]*)(#.*)?"),
"table_row": regexp.MustCompile("^(\\s*)\\|([^#]*)(#.*)?"),
}
type lexer struct {
reader *bufio.Reader reader *bufio.Reader
lines int lines int
} }
func New(r io.Reader) *Lexer { func newLexer(r io.Reader) *lexer {
return &Lexer{ return &lexer{
reader: bufio.NewReader(r), reader: bufio.NewReader(r),
} }
} }
func (l *Lexer) Next() *Token { func (l *lexer) read() *Token {
line, err := l.reader.ReadString(byte('\n')) line, err := l.reader.ReadString(byte('\n'))
if err != nil && len(line) == 0 { if err != nil && len(line) == 0 {
return &Token{ return &Token{

Просмотреть файл

@ -1,14 +0,0 @@
package lexer
import "regexp"
var matchers = map[string]*regexp.Regexp{
"feature": regexp.MustCompile("^(\\s*)Feature:\\s*([^#]*)(#.*)?"),
"scenario": regexp.MustCompile("^(\\s*)Scenario:\\s*([^#]*)(#.*)?"),
"background": regexp.MustCompile("^(\\s*)Background:(\\s*#.*)?"),
"step": regexp.MustCompile("^(\\s*)(Given|When|Then|And|But)\\s+([^#]*)(#.*)?"),
"comment": regexp.MustCompile("^(\\s*)#(.+)"),
"pystring": regexp.MustCompile("^(\\s*)\\\"\\\"\\\""),
"tags": regexp.MustCompile("^(\\s*)@([^#]*)(#.*)?"),
"table_row": regexp.MustCompile("^(\\s*)\\|([^#]*)(#.*)?"),
}

Просмотреть файл

@ -1,18 +0,0 @@
package lexer
type Token struct {
Type TokenType // type of token
Line, Indent int // line and indentation number
Value string // interpreted value
Text string // same text as read
Comment string // a comment
}
func (t *Token) OfType(all ...TokenType) bool {
for _, typ := range all {
if typ == t.Type {
return true
}
}
return false
}

Просмотреть файл

@ -1,11 +1,11 @@
package lexer package gherkin
import ( import (
"strings" "strings"
"testing" "testing"
) )
var samples = map[string]string{ var lexerSamples = map[string]string{
"feature": `Feature: gherkin lexer "feature": `Feature: gherkin lexer
in order to run features in order to run features
as gherkin lexer as gherkin lexer
@ -29,13 +29,9 @@ var samples = map[string]string{
| John | Doe | 79 |`, | John | Doe | 79 |`,
} }
func indent(n int, s string) string {
return strings.Repeat(" ", n) + s
}
func Test_feature_read(t *testing.T) { func Test_feature_read(t *testing.T) {
l := New(strings.NewReader(samples["feature"])) l := newLexer(strings.NewReader(lexerSamples["feature"]))
tok := l.Next() tok := l.read()
if tok.Type != FEATURE { if tok.Type != FEATURE {
t.Fatalf("Expected a 'feature' type, but got: '%s'", tok.Type) t.Fatalf("Expected a 'feature' type, but got: '%s'", tok.Type)
} }
@ -50,7 +46,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '0', but got: '%d'", tok.Indent) t.Fatalf("Expected a token identation to be '0', but got: '%d'", tok.Indent)
} }
tok = l.Next() tok = l.read()
if tok.Type != TEXT { if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type) t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
} }
@ -65,7 +61,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent) t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
} }
tok = l.Next() tok = l.read()
if tok.Type != TEXT { if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type) t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
} }
@ -80,7 +76,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent) t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
} }
tok = l.Next() tok = l.read()
if tok.Type != TEXT { if tok.Type != TEXT {
t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type) t.Fatalf("Expected a 'text' type, but got: '%s'", tok.Type)
} }
@ -95,7 +91,7 @@ func Test_feature_read(t *testing.T) {
t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent) t.Fatalf("Expected a token identation to be '2', but got: '%d'", tok.Indent)
} }
tok = l.Next() tok = l.read()
if tok.Type != EOF { if tok.Type != EOF {
t.Fatalf("Expected an 'eof' type, but got: '%s'", tok.Type) t.Fatalf("Expected an 'eof' type, but got: '%s'", tok.Type)
} }
@ -103,21 +99,21 @@ func Test_feature_read(t *testing.T) {
func Test_minimal_feature(t *testing.T) { func Test_minimal_feature(t *testing.T) {
file := strings.Join([]string{ file := strings.Join([]string{
samples["feature"] + "\n", lexerSamples["feature"] + "\n",
indent(2, samples["background"]), indent(2, lexerSamples["background"]),
indent(4, samples["step_given"]) + "\n", indent(4, lexerSamples["step_given"]) + "\n",
indent(2, samples["comment"]), indent(2, lexerSamples["comment"]),
indent(2, samples["scenario"]), indent(2, lexerSamples["scenario"]),
indent(4, samples["step_given"]), indent(4, lexerSamples["step_given"]),
indent(4, samples["step_when"]), indent(4, lexerSamples["step_when"]),
indent(4, samples["step_then"]), indent(4, lexerSamples["step_then"]),
}, "\n") }, "\n")
l := New(strings.NewReader(file)) l := newLexer(strings.NewReader(file))
var tokens []TokenType var tokens []TokenType
for tok := l.Next(); tok.Type != EOF; tok = l.Next() { for tok := l.read(); tok.Type != EOF; tok = l.read() {
tokens = append(tokens, tok.Type) tokens = append(tokens, tok.Type)
} }
expected := []TokenType{ expected := []TokenType{
@ -146,16 +142,16 @@ func Test_minimal_feature(t *testing.T) {
func Test_table_row_reading(t *testing.T) { func Test_table_row_reading(t *testing.T) {
file := strings.Join([]string{ file := strings.Join([]string{
indent(2, samples["background"]), indent(2, lexerSamples["background"]),
indent(4, samples["step_given_table"]), indent(4, lexerSamples["step_given_table"]),
indent(4, samples["step_given"]), indent(4, lexerSamples["step_given"]),
}, "\n") }, "\n")
l := New(strings.NewReader(file)) l := newLexer(strings.NewReader(file))
var types []TokenType var types []TokenType
var values []string var values []string
var indents []int var indents []int
for tok := l.Next(); tok.Type != EOF; tok = l.Next() { for tok := l.read(); tok.Type != EOF; tok = l.read() {
types = append(types, tok.Type) types = append(types, tok.Type)
values = append(values, tok.Value) values = append(values, tok.Value)
indents = append(indents, tok.Indent) indents = append(indents, tok.Indent)

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import ( import (
"strings" "strings"
"testing" "testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
) )
func (s *Scenario) assertHasTag(tag string, t *testing.T) { func (s *Scenario) assertHasTag(tag string, t *testing.T) {
@ -41,7 +39,7 @@ func Test_parse_feature_file(t *testing.T) {
}, "\n") }, "\n")
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(content)), lx: newLexer(strings.NewReader(content)),
path: "usual.feature", path: "usual.feature",
ast: newAST(), ast: newAST(),
} }
@ -51,36 +49,36 @@ func Test_parse_feature_file(t *testing.T) {
} }
ft.assertTitle("gherkin parser", t) ft.assertTitle("gherkin parser", t)
ft.AST.assertMatchesTypes([]lexer.TokenType{ ft.AST.assertMatchesTypes([]TokenType{
lexer.TAGS, TAGS,
lexer.FEATURE, FEATURE,
lexer.TEXT, TEXT,
lexer.TEXT, TEXT,
lexer.TEXT, TEXT,
lexer.NEW_LINE, NEW_LINE,
lexer.BACKGROUND, BACKGROUND,
lexer.GIVEN, GIVEN,
lexer.TABLE_ROW, TABLE_ROW,
lexer.NEW_LINE, NEW_LINE,
lexer.SCENARIO, SCENARIO,
lexer.GIVEN, GIVEN,
lexer.AND, AND,
lexer.WHEN, WHEN,
lexer.THEN, THEN,
lexer.NEW_LINE, NEW_LINE,
lexer.TAGS, TAGS,
lexer.SCENARIO, SCENARIO,
lexer.GIVEN, GIVEN,
lexer.AND, AND,
lexer.WHEN, WHEN,
lexer.THEN, THEN,
lexer.NEW_LINE, NEW_LINE,
lexer.TAGS, TAGS,
lexer.SCENARIO, SCENARIO,
}, t) }, t)
ft.assertHasNumScenarios(3, t) ft.assertHasNumScenarios(3, t)

Просмотреть файл

@ -3,8 +3,6 @@ package gherkin
import ( import (
"strings" "strings"
"testing" "testing"
"github.com/DATA-DOG/godog/gherkin/lexer"
) )
var testStepSamples = map[string]string{ var testStepSamples = map[string]string{
@ -92,7 +90,7 @@ func (s *Step) assertTableRow(t *testing.T, num int, cols ...string) {
func Test_parse_basic_given_step(t *testing.T) { func Test_parse_basic_given_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given"])), lx: newLexer(strings.NewReader(testStepSamples["given"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -108,15 +106,15 @@ func Test_parse_basic_given_step(t *testing.T) {
steps[0].assertText("I'm a step", t) steps[0].assertText("I'm a step", t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_step_with_comment(t *testing.T) { func Test_parse_step_with_comment(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_comment"])), lx: newLexer(strings.NewReader(testStepSamples["step_comment"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -133,15 +131,15 @@ func Test_parse_step_with_comment(t *testing.T) {
steps[0].assertComment("sets admin permissions", t) steps[0].assertComment("sets admin permissions", t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_hash_table_given_step(t *testing.T) { func Test_parse_hash_table_given_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given_table_hash"])), lx: newLexer(strings.NewReader(testStepSamples["given_table_hash"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -158,16 +156,16 @@ func Test_parse_hash_table_given_step(t *testing.T) {
steps[0].assertTableRow(t, 0, "name", "John Doe") steps[0].assertTableRow(t, 0, "name", "John Doe")
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.TABLE_ROW, TABLE_ROW,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_table_given_step(t *testing.T) { func Test_parse_table_given_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["given_table"])), lx: newLexer(strings.NewReader(testStepSamples["given_table"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -186,18 +184,18 @@ func Test_parse_table_given_step(t *testing.T) {
steps[0].assertTableRow(t, 2, "Jane", "Doe") steps[0].assertTableRow(t, 2, "Jane", "Doe")
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.TABLE_ROW, TABLE_ROW,
lexer.TABLE_ROW, TABLE_ROW,
lexer.TABLE_ROW, TABLE_ROW,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_pystring_step(t *testing.T) { func Test_parse_pystring_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["then_pystring"])), lx: newLexer(strings.NewReader(testStepSamples["then_pystring"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -217,19 +215,19 @@ func Test_parse_pystring_step(t *testing.T) {
}, "\n"), t) }, "\n"), t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.THEN, THEN,
lexer.PYSTRING, PYSTRING,
lexer.TEXT, TEXT,
lexer.AND, // we do not care what we parse inside PYSTRING even if its whole behat feature text AND, // we do not care what we parse inside PYSTRING even if its whole behat feature text
lexer.PYSTRING, PYSTRING,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_empty_pystring_step(t *testing.T) { func Test_parse_empty_pystring_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["when_pystring_empty"])), lx: newLexer(strings.NewReader(testStepSamples["when_pystring_empty"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -246,17 +244,17 @@ func Test_parse_empty_pystring_step(t *testing.T) {
steps[0].assertPyString("", t) steps[0].assertPyString("", t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.WHEN, WHEN,
lexer.PYSTRING, PYSTRING,
lexer.PYSTRING, PYSTRING,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_unclosed_pystring_step(t *testing.T) { func Test_parse_unclosed_pystring_step(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["when_pystring_unclosed"])), lx: newLexer(strings.NewReader(testStepSamples["when_pystring_unclosed"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -264,18 +262,18 @@ func Test_parse_unclosed_pystring_step(t *testing.T) {
if err == nil { if err == nil {
t.Fatalf("expected an error, but got none") t.Fatalf("expected an error, but got none")
} }
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.WHEN, WHEN,
lexer.PYSTRING, PYSTRING,
lexer.TEXT, TEXT,
lexer.TEXT, TEXT,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_step_group(t *testing.T) { func Test_parse_step_group(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_group"])), lx: newLexer(strings.NewReader(testStepSamples["step_group"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -297,18 +295,18 @@ func Test_parse_step_group(t *testing.T) {
steps[3].assertText("something should happen", t) steps[3].assertText("something should happen", t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.AND, AND,
lexer.WHEN, WHEN,
lexer.THEN, THEN,
lexer.EOF, EOF,
}, t) }, t)
} }
func Test_parse_another_step_group(t *testing.T) { func Test_parse_another_step_group(t *testing.T) {
p := &parser{ p := &parser{
lx: lexer.New(strings.NewReader(testStepSamples["step_group_another"])), lx: newLexer(strings.NewReader(testStepSamples["step_group_another"])),
path: "some.feature", path: "some.feature",
ast: newAST(), ast: newAST(),
} }
@ -330,11 +328,11 @@ func Test_parse_another_step_group(t *testing.T) {
steps[3].assertText("I expect the result", t) steps[3].assertText("I expect the result", t)
p.next() // step over to eof p.next() // step over to eof
p.ast.assertMatchesTypes([]lexer.TokenType{ p.ast.assertMatchesTypes([]TokenType{
lexer.GIVEN, GIVEN,
lexer.AND, AND,
lexer.WHEN, WHEN,
lexer.THEN, THEN,
lexer.EOF, EOF,
}, t) }, t)
} }

Просмотреть файл

@ -1,4 +1,4 @@
package lexer package gherkin
type TokenType int type TokenType int
@ -64,3 +64,20 @@ func (t TokenType) String() string {
} }
return "illegal" return "illegal"
} }
type Token struct {
Type TokenType // type of token
Line, Indent int // line and indentation number
Value string // interpreted value
Text string // same text as read
Comment string // a comment
}
func (t *Token) OfType(all ...TokenType) bool {
for _, typ := range all {
if typ == t.Type {
return true
}
}
return false
}