mirror of
https://github.com/golang/go
synced 2024-10-04 20:21:22 -06:00
5a9ad8b9e3
Specifically: * lib/godoc: - provide file set (FSet) argument to formatters where needed * src/cmd: - cgo, ebnflint, godoc, gofmt, goinstall: provide file set (fset) where needed - godoc: remove local binary search with sort.Search (change by rsc), extract file set for formatters * src/pkg: - exp/eval: remove embedded token.Position fields from nodes and replace with named token.Pos fields; add corresponding Pos() accessor methods - go/token: added file.Line(), changed signature of File.Position() * test/fixedbugs/: - bug206.go: change test to not rely on token.Pos details * added various extra comments * Runs all.bash * gofmt formats all of src, misc w/o changes * godoc runs * performance: - The new version of godoc consumes about the same space after indexing has completed, but indexing is half the speed. Significant space savings are expected from smaller ASTs, but since they are thrown away after a file has been indexed, this is not visible anymore. The slower indexing time is due to the much more expensive computation of line information. However, with the new compressed position information, indexing can be rewritten and simplified. Furthermore, computing the line info can be done more efficiently. New godoc, immediately after indexing completed (best of three runs): PID COMMAND %CPU TIME #TH #PRTS #MREGS RPRVT RSHRD RSIZE VSIZE 44381 godoc 0.0% 0:38.00 4 19 149 145M 184K 148M 176M 2010/12/03 17:58:35 index updated (39.231s, 18505 unique words, 386387 spots) 2010/12/03 17:58:35 bytes=90858456 footprint=199182584 2010/12/03 17:58:36 bytes=47858568 footprint=167295224 Old godoc, immediately after indexing completed (best of three runs): PID COMMAND %CPU TIME #TH #PRTS #MREGS RPRVT RSHRD RSIZE VSIZE 23167 godoc 0.0% 0:22.02 4 17 132 129M 184K 132M 173M 2010/12/03 14:51:32 index updated (24.892s, 18765 unique words, 393830 spots) 2010/12/03 14:51:32 bytes=66404528 footprint=163907832 2010/12/03 14:51:32 bytes=46282224 footprint=163907832 The different numbers for unique words/spots stem from the fact the the two workspaces are not exactly identical. The new godoc maintains a large file set data structure during indexing which (probably) is the reason for the larger heap (90858456 vs 66404528) before garbage collection. R=rsc, r CC=golang-dev https://golang.org/cl/3050041
249 lines
6.3 KiB
Go
249 lines
6.3 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// A library for EBNF grammars. The input is text ([]byte) satisfying
|
|
// the following grammar (represented itself in EBNF):
|
|
//
|
|
// Production = name "=" Expression "." .
|
|
// Expression = Alternative { "|" Alternative } .
|
|
// Alternative = Term { Term } .
|
|
// Term = name | token [ "..." token ] | Group | Option | Repetition .
|
|
// Group = "(" Expression ")" .
|
|
// Option = "[" Expression "]" .
|
|
// Repetition = "{" Expression "}" .
|
|
//
|
|
// A name is a Go identifier, a token is a Go string, and comments
|
|
// and white space follow the same rules as for the Go language.
|
|
// Production names starting with an uppercase Unicode letter denote
|
|
// non-terminal productions (i.e., productions which allow white-space
|
|
// and comments between tokens); all other production names denote
|
|
// lexical productions.
|
|
//
|
|
package ebnf
|
|
|
|
import (
|
|
"go/scanner"
|
|
"go/token"
|
|
"os"
|
|
"unicode"
|
|
"utf8"
|
|
)
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
// Internal representation
|
|
|
|
type (
|
|
// An Expression node represents a production expression.
|
|
Expression interface {
|
|
// Pos is the position of the first character of the syntactic construct
|
|
Pos() token.Pos
|
|
}
|
|
|
|
// An Alternative node represents a non-empty list of alternative expressions.
|
|
Alternative []Expression // x | y | z
|
|
|
|
// A Sequence node represents a non-empty list of sequential expressions.
|
|
Sequence []Expression // x y z
|
|
|
|
// A Name node represents a production name.
|
|
Name struct {
|
|
StringPos token.Pos
|
|
String string
|
|
}
|
|
|
|
// A Token node represents a literal.
|
|
Token struct {
|
|
StringPos token.Pos
|
|
String string
|
|
}
|
|
|
|
// A List node represents a range of characters.
|
|
Range struct {
|
|
Begin, End *Token // begin ... end
|
|
}
|
|
|
|
// A Group node represents a grouped expression.
|
|
Group struct {
|
|
Lparen token.Pos
|
|
Body Expression // (body)
|
|
}
|
|
|
|
// An Option node represents an optional expression.
|
|
Option struct {
|
|
Lbrack token.Pos
|
|
Body Expression // [body]
|
|
}
|
|
|
|
// A Repetition node represents a repeated expression.
|
|
Repetition struct {
|
|
Lbrace token.Pos
|
|
Body Expression // {body}
|
|
}
|
|
|
|
// A Production node represents an EBNF production.
|
|
Production struct {
|
|
Name *Name
|
|
Expr Expression
|
|
}
|
|
|
|
// A Grammar is a set of EBNF productions. The map
|
|
// is indexed by production name.
|
|
//
|
|
Grammar map[string]*Production
|
|
)
|
|
|
|
|
|
func (x Alternative) Pos() token.Pos { return x[0].Pos() } // the parser always generates non-empty Alternative
|
|
func (x Sequence) Pos() token.Pos { return x[0].Pos() } // the parser always generates non-empty Sequences
|
|
func (x *Name) Pos() token.Pos { return x.StringPos }
|
|
func (x *Token) Pos() token.Pos { return x.StringPos }
|
|
func (x *Range) Pos() token.Pos { return x.Begin.Pos() }
|
|
func (x *Group) Pos() token.Pos { return x.Lparen }
|
|
func (x *Option) Pos() token.Pos { return x.Lbrack }
|
|
func (x *Repetition) Pos() token.Pos { return x.Lbrace }
|
|
func (x *Production) Pos() token.Pos { return x.Name.Pos() }
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
// Grammar verification
|
|
|
|
func isLexical(name string) bool {
|
|
ch, _ := utf8.DecodeRuneInString(name)
|
|
return !unicode.IsUpper(ch)
|
|
}
|
|
|
|
|
|
type verifier struct {
|
|
fset *token.FileSet
|
|
scanner.ErrorVector
|
|
worklist []*Production
|
|
reached Grammar // set of productions reached from (and including) the root production
|
|
grammar Grammar
|
|
}
|
|
|
|
|
|
func (v *verifier) error(pos token.Pos, msg string) {
|
|
v.Error(v.fset.Position(pos), msg)
|
|
}
|
|
|
|
|
|
func (v *verifier) push(prod *Production) {
|
|
name := prod.Name.String
|
|
if _, found := v.reached[name]; !found {
|
|
v.worklist = append(v.worklist, prod)
|
|
v.reached[name] = prod
|
|
}
|
|
}
|
|
|
|
|
|
func (v *verifier) verifyChar(x *Token) int {
|
|
s := x.String
|
|
if utf8.RuneCountInString(s) != 1 {
|
|
v.error(x.Pos(), "single char expected, found "+s)
|
|
return 0
|
|
}
|
|
ch, _ := utf8.DecodeRuneInString(s)
|
|
return ch
|
|
}
|
|
|
|
|
|
func (v *verifier) verifyExpr(expr Expression, lexical bool) {
|
|
switch x := expr.(type) {
|
|
case nil:
|
|
// empty expression
|
|
case Alternative:
|
|
for _, e := range x {
|
|
v.verifyExpr(e, lexical)
|
|
}
|
|
case Sequence:
|
|
for _, e := range x {
|
|
v.verifyExpr(e, lexical)
|
|
}
|
|
case *Name:
|
|
// a production with this name must exist;
|
|
// add it to the worklist if not yet processed
|
|
if prod, found := v.grammar[x.String]; found {
|
|
v.push(prod)
|
|
} else {
|
|
v.error(x.Pos(), "missing production "+x.String)
|
|
}
|
|
// within a lexical production references
|
|
// to non-lexical productions are invalid
|
|
if lexical && !isLexical(x.String) {
|
|
v.error(x.Pos(), "reference to non-lexical production "+x.String)
|
|
}
|
|
case *Token:
|
|
// nothing to do for now
|
|
case *Range:
|
|
i := v.verifyChar(x.Begin)
|
|
j := v.verifyChar(x.End)
|
|
if i >= j {
|
|
v.error(x.Pos(), "decreasing character range")
|
|
}
|
|
case *Group:
|
|
v.verifyExpr(x.Body, lexical)
|
|
case *Option:
|
|
v.verifyExpr(x.Body, lexical)
|
|
case *Repetition:
|
|
v.verifyExpr(x.Body, lexical)
|
|
default:
|
|
panic("unreachable")
|
|
}
|
|
}
|
|
|
|
|
|
func (v *verifier) verify(fset *token.FileSet, grammar Grammar, start string) {
|
|
// find root production
|
|
root, found := grammar[start]
|
|
if !found {
|
|
// token.NoPos doesn't require a file set;
|
|
// ok to set v.fset only afterwards
|
|
v.error(token.NoPos, "no start production "+start)
|
|
return
|
|
}
|
|
|
|
// initialize verifier
|
|
v.fset = fset
|
|
v.ErrorVector.Reset()
|
|
v.worklist = v.worklist[0:0]
|
|
v.reached = make(Grammar)
|
|
v.grammar = grammar
|
|
|
|
// work through the worklist
|
|
v.push(root)
|
|
for {
|
|
n := len(v.worklist) - 1
|
|
if n < 0 {
|
|
break
|
|
}
|
|
prod := v.worklist[n]
|
|
v.worklist = v.worklist[0:n]
|
|
v.verifyExpr(prod.Expr, isLexical(prod.Name.String))
|
|
}
|
|
|
|
// check if all productions were reached
|
|
if len(v.reached) < len(v.grammar) {
|
|
for name, prod := range v.grammar {
|
|
if _, found := v.reached[name]; !found {
|
|
v.error(prod.Pos(), name+" is unreachable")
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// Verify checks that:
|
|
// - all productions used are defined
|
|
// - all productions defined are used when beginning at start
|
|
// - lexical productions refer only to other lexical productions
|
|
//
|
|
// Position information is interpreted relative to the file set fset.
|
|
//
|
|
func Verify(fset *token.FileSet, grammar Grammar, start string) os.Error {
|
|
var v verifier
|
|
v.verify(fset, grammar, start)
|
|
return v.GetError(scanner.Sorted)
|
|
}
|