1
0
mirror of https://github.com/golang/go synced 2024-11-25 05:57:57 -07:00

go/scanner: remove Tokenize() - was only used in tests

R=r
CC=golang-dev
https://golang.org/cl/3415042
This commit is contained in:
Robert Griesemer 2010-12-07 10:08:00 -08:00
parent 6aa85d1cbe
commit 14eb03f614
2 changed files with 60 additions and 59 deletions

View File

@ -4,7 +4,18 @@
// A scanner for Go source text. Takes a []byte as source which can // A scanner for Go source text. Takes a []byte as source which can
// then be tokenized through repeated calls to the Scan function. // then be tokenized through repeated calls to the Scan function.
// For a sample use of a scanner, see the implementation of Tokenize. // Typical use:
//
// var s Scanner
// fset := token.NewFileSet() // position information is relative to fset
// s.Init(fset, filename, src, nil /* no error handler */, 0)
// for {
// pos, tok, lit := s.Scan()
// if tok == token.EOF {
// break
// }
// // do something here with pos, tok, and lit
// }
// //
package scanner package scanner
@ -19,8 +30,7 @@ import (
// A Scanner holds the scanner's internal state while processing // A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data // a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use. For // structure but must be initialized via Init before use.
// a sample use, see the implementation of Tokenize.
// //
type Scanner struct { type Scanner struct {
// immutable state // immutable state
@ -692,19 +702,3 @@ scanAgain:
} }
return S.file.Pos(offs), tok, S.src[offs:S.offset] return S.file.Pos(offs), tok, S.src[offs:S.offset]
} }
// Tokenize calls a function f with the token position, token value, and token
// text for each token in the source src. The other parameters have the same
// meaning as for the Init function. Tokenize keeps scanning until f returns
// false (usually when the token value is token.EOF). The result is the number
// of errors encountered.
//
func Tokenize(set *token.FileSet, filename string, src []byte, err ErrorHandler, mode uint, f func(pos token.Pos, tok token.Token, lit []byte) bool) int {
var s Scanner
s.Init(set, filename, src, err, mode)
for f(s.Scan()) {
// action happens in f
}
return s.ErrorCount
}

View File

@ -227,42 +227,46 @@ func TestScan(t *testing.T) {
whitespace_linecount := newlineCount(whitespace) whitespace_linecount := newlineCount(whitespace)
// verify scan // verify scan
var s Scanner
s.Init(fset, "", []byte(src), &testErrorHandler{t}, ScanComments)
index := 0 index := 0
epos := token.Position{"", 0, 1, 1} // expected position epos := token.Position{"", 0, 1, 1} // expected position
nerrors := Tokenize(fset, "", []byte(src), &testErrorHandler{t}, ScanComments, for {
func(pos token.Pos, tok token.Token, litb []byte) bool { pos, tok, litb := s.Scan()
e := elt{token.EOF, "", special} e := elt{token.EOF, "", special}
if index < len(tokens) { if index < len(tokens) {
e = tokens[index] e = tokens[index]
} }
lit := string(litb) lit := string(litb)
if tok == token.EOF { if tok == token.EOF {
lit = "<EOF>" lit = "<EOF>"
epos.Line = src_linecount epos.Line = src_linecount
epos.Column = 1 epos.Column = 1
} }
checkPos(t, lit, pos, epos) checkPos(t, lit, pos, epos)
if tok != e.tok { if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String()) t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
} }
if e.tok.IsLiteral() && lit != e.lit { if e.tok.IsLiteral() && lit != e.lit {
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit) t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
} }
if tokenclass(tok) != e.class { if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
} }
epos.Offset += len(lit) + len(whitespace) epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount epos.Line += newlineCount(lit) + whitespace_linecount
if tok == token.COMMENT && litb[1] == '/' { if tok == token.COMMENT && litb[1] == '/' {
// correct for unaccounted '/n' in //-style comment // correct for unaccounted '/n' in //-style comment
epos.Offset++ epos.Offset++
epos.Line++ epos.Line++
} }
index++ index++
return tok != token.EOF if tok == token.EOF {
}) break
if nerrors != 0 { }
t.Errorf("found %d errors", nerrors) }
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
} }
} }
@ -551,10 +555,13 @@ func TestStdErrorHander(t *testing.T) {
"@ @ @" // original file, line 1 again "@ @ @" // original file, line 1 again
v := new(ErrorVector) v := new(ErrorVector)
nerrors := Tokenize(fset, "File1", []byte(src), v, 0, var s Scanner
func(pos token.Pos, tok token.Token, litb []byte) bool { s.Init(fset, "File1", []byte(src), v, 0)
return tok != token.EOF for {
}) if _, tok, _ := s.Scan(); tok == token.EOF {
break
}
}
list := v.GetErrorList(Raw) list := v.GetErrorList(Raw)
if len(list) != 9 { if len(list) != 9 {
@ -574,8 +581,8 @@ func TestStdErrorHander(t *testing.T) {
PrintError(os.Stderr, list) PrintError(os.Stderr, list)
} }
if v.ErrorCount() != nerrors { if v.ErrorCount() != s.ErrorCount {
t.Errorf("found %d errors, expected %d", v.ErrorCount(), nerrors) t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.ErrorCount)
} }
} }