1
0
mirror of https://github.com/golang/go synced 2024-11-25 08:57:58 -07:00

html: parse doctype tokens; merge adjacent text nodes.

The test case input is "<!DOCTYPE html><span><button>foo</span>bar".
The correct parse is:
| <!DOCTYPE html>
| <html>
|   <head>
|   <body>
|     <span>
|       <button>
|         "foobar"

R=gri
CC=golang-dev
https://golang.org/cl/4794063
This commit is contained in:
Nigel Tao 2011-08-01 10:26:46 +10:00
parent 4d2766e994
commit 1d0c141d7d
4 changed files with 170 additions and 102 deletions

View File

@ -13,6 +13,7 @@ const (
DocumentNode DocumentNode
ElementNode ElementNode
CommentNode CommentNode
DoctypeNode
scopeMarkerNode scopeMarkerNode
) )

View File

@ -81,8 +81,8 @@ func (p *parser) popUntil(stopTags []string, matchTags ...string) bool {
return false return false
} }
// addChild adds a child node n to the top element, and pushes n if it is an // addChild adds a child node n to the top element, and pushes n onto the stack
// element node (text nodes are not part of the stack of open elements). // of open elements if it is an element node.
func (p *parser) addChild(n *Node) { func (p *parser) addChild(n *Node) {
p.top().Add(n) p.top().Add(n)
if n.Type == ElementNode { if n.Type == ElementNode {
@ -90,10 +90,15 @@ func (p *parser) addChild(n *Node) {
} }
} }
// addText calls addChild with a text node. // addText adds text to the preceding node if it is a text node, or else it
// calls addChild with a new text node.
func (p *parser) addText(text string) { func (p *parser) addText(text string) {
// TODO: merge s with previous text, if the preceding node is a text node.
// TODO: distinguish whitespace text from others. // TODO: distinguish whitespace text from others.
t := p.top()
if i := len(t.Child); i > 0 && t.Child[i-1].Type == TextNode {
t.Child[i-1].Data += text
return
}
p.addChild(&Node{ p.addChild(&Node{
Type: TextNode, Type: TextNode,
Data: text, Data: text,
@ -201,7 +206,15 @@ func useTheRulesFor(p *parser, actual, delegate insertionMode) (insertionMode, b
// Section 11.2.5.4.1. // Section 11.2.5.4.1.
func initialIM(p *parser) (insertionMode, bool) { func initialIM(p *parser) (insertionMode, bool) {
// TODO: check p.tok for DOCTYPE. if p.tok.Type == DoctypeToken {
p.addChild(&Node{
Type: DoctypeNode,
Data: p.tok.Data,
})
return beforeHTMLIM, true
}
// TODO: set "quirks mode"? It's defined in the DOM spec instead of HTML5 proper,
// and so switching on "quirks mode" might belong in a different package.
return beforeHTMLIM, false return beforeHTMLIM, false
} }

View File

@ -85,6 +85,8 @@ func dumpLevel(w io.Writer, n *Node, level int) os.Error {
fmt.Fprintf(w, "%q", EscapeString(n.Data)) fmt.Fprintf(w, "%q", EscapeString(n.Data))
case CommentNode: case CommentNode:
return os.NewError("COMMENT") return os.NewError("COMMENT")
case DoctypeNode:
fmt.Fprintf(w, "<!DOCTYPE %s>", EscapeString(n.Data))
case scopeMarkerNode: case scopeMarkerNode:
return os.NewError("unexpected scopeMarkerNode") return os.NewError("unexpected scopeMarkerNode")
default: default:
@ -121,7 +123,7 @@ func TestParser(t *testing.T) {
rc := make(chan io.Reader) rc := make(chan io.Reader)
go readDat(filename, rc) go readDat(filename, rc)
// TODO(nigeltao): Process all test cases, not just a subset. // TODO(nigeltao): Process all test cases, not just a subset.
for i := 0; i < 23; i++ { for i := 0; i < 25; i++ {
// Parse the #data section. // Parse the #data section.
b, err := ioutil.ReadAll(<-rc) b, err := ioutil.ReadAll(<-rc)
if err != nil { if err != nil {

View File

@ -27,6 +27,8 @@ const (
SelfClosingTagToken SelfClosingTagToken
// A CommentToken looks like <!--x-->. // A CommentToken looks like <!--x-->.
CommentToken CommentToken
// A DoctypeToken looks like <!DOCTYPE x>
DoctypeToken
) )
// String returns a string representation of the TokenType. // String returns a string representation of the TokenType.
@ -44,6 +46,8 @@ func (t TokenType) String() string {
return "SelfClosingTag" return "SelfClosingTag"
case CommentToken: case CommentToken:
return "Comment" return "Comment"
case DoctypeToken:
return "Doctype"
} }
return "Invalid(" + strconv.Itoa(int(t)) + ")" return "Invalid(" + strconv.Itoa(int(t)) + ")"
} }
@ -56,9 +60,9 @@ type Attribute struct {
} }
// A Token consists of a TokenType and some Data (tag name for start and end // A Token consists of a TokenType and some Data (tag name for start and end
// tags, content for text and comments). A tag Token may also contain a slice // tags, content for text, comments and doctypes). A tag Token may also contain
// of Attributes. Data is unescaped for all Tokens (it looks like "a<b" rather // a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
// than "a&lt;b"). // rather than "a&lt;b").
type Token struct { type Token struct {
Type TokenType Type TokenType
Data string Data string
@ -97,6 +101,8 @@ func (t Token) String() string {
return "<" + t.tagString() + "/>" return "<" + t.tagString() + "/>"
case CommentToken: case CommentToken:
return "<!--" + EscapeString(t.Data) + "-->" return "<!--" + EscapeString(t.Data) + "-->"
case DoctypeToken:
return "<!DOCTYPE " + EscapeString(t.Data) + ">"
} }
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
} }
@ -109,9 +115,15 @@ type Tokenizer struct {
// r is the source of the HTML text. // r is the source of the HTML text.
r io.Reader r io.Reader
// tt is the TokenType of the most recently read token. If tt == Error // tt is the TokenType of the most recently read token.
// then err is the error associated with trying to read that token. tt TokenType
tt TokenType // err is the first error encountered during tokenization. It is possible
// for tt != Error && err != nil to hold: this means that Next returned a
// valid token but the subsequent Next call will return an error token.
// For example, if the HTML text input was just "plain", then the first
// Next call would set z.err to os.EOF but return a TextToken, and all
// subsequent Next calls would return an ErrorToken.
// err is never reset. Once it becomes non-nil, it stays non-nil.
err os.Error err os.Error
// buf[p0:p1] holds the raw data of the most recent token. // buf[p0:p1] holds the raw data of the most recent token.
// buf[p1:] is buffered input that will yield future tokens. // buf[p1:] is buffered input that will yield future tokens.
@ -137,7 +149,9 @@ func (z *Tokenizer) Raw() []byte {
// readByte returns the next byte from the input stream, doing a buffered read // readByte returns the next byte from the input stream, doing a buffered read
// from z.r into z.buf if necessary. z.buf[z.p0:z.p1] remains a contiguous byte // from z.r into z.buf if necessary. z.buf[z.p0:z.p1] remains a contiguous byte
// slice that holds all the bytes read so far for the current token. // slice that holds all the bytes read so far for the current token.
func (z *Tokenizer) readByte() (byte, os.Error) { // It sets z.err if the underlying reader returns an error.
// Pre-condition: z.err == nil.
func (z *Tokenizer) readByte() byte {
if z.p1 >= len(z.buf) { if z.p1 >= len(z.buf) {
// Our buffer is exhausted and we have to read from z.r. // Our buffer is exhausted and we have to read from z.r.
// We copy z.buf[z.p0:z.p1] to the beginning of z.buf. If the length // We copy z.buf[z.p0:z.p1] to the beginning of z.buf. If the length
@ -149,82 +163,120 @@ func (z *Tokenizer) readByte() (byte, os.Error) {
if 2*d > c { if 2*d > c {
buf1 = make([]byte, d, 2*c) buf1 = make([]byte, d, 2*c)
} else { } else {
buf1 = z.buf[0:d] buf1 = z.buf[:d]
} }
copy(buf1, z.buf[z.p0:z.p1]) copy(buf1, z.buf[z.p0:z.p1])
z.p0, z.p1, z.buf = 0, d, buf1[0:d] z.p0, z.p1, z.buf = 0, d, buf1[:d]
// Now that we have copied the live bytes to the start of the buffer, // Now that we have copied the live bytes to the start of the buffer,
// we read from z.r into the remainder. // we read from z.r into the remainder.
n, err := z.r.Read(buf1[d:cap(buf1)]) n, err := z.r.Read(buf1[d:cap(buf1)])
if err != nil { if err != nil {
return 0, err z.err = err
return 0
} }
z.buf = buf1[0 : d+n] z.buf = buf1[:d+n]
} }
x := z.buf[z.p1] x := z.buf[z.p1]
z.p1++ z.p1++
return x, nil return x
} }
// readTo keeps reading bytes until x is found. // readTo keeps reading bytes until x is found or a read error occurs. If an
func (z *Tokenizer) readTo(x uint8) os.Error { // error does occur, z.err is set to that error.
// Pre-condition: z.err == nil.
func (z *Tokenizer) readTo(x uint8) {
for { for {
c, err := z.readByte() c := z.readByte()
if err != nil { if z.err != nil {
return err return
} }
switch c { switch c {
case x: case x:
return nil return
case '\\': case '\\':
_, err = z.readByte() z.readByte()
if err != nil { if z.err != nil {
return err return
} }
} }
} }
panic("unreachable")
} }
// nextMarkupDeclaration returns the next TokenType starting with "<!". // nextComment reads the next token starting with "<!--".
func (z *Tokenizer) nextMarkupDeclaration() (TokenType, os.Error) { // The opening "<!--" has already been consumed.
// TODO: check for <!DOCTYPE ... >, don't just assume that it's a comment. // Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 4 <= z.p1.
for i := 0; i < 2; i++ { func (z *Tokenizer) nextComment() {
c, err := z.readByte()
if err != nil {
return TextToken, err
}
if c != '-' {
return z.nextText(), nil
}
}
// <!--> is a valid comment. // <!--> is a valid comment.
for dashCount := 2; ; { for dashCount := 2; ; {
c, err := z.readByte() c := z.readByte()
if err != nil { if z.err != nil {
return TextToken, err return
} }
switch c { switch c {
case '-': case '-':
dashCount++ dashCount++
case '>': case '>':
if dashCount >= 2 { if dashCount >= 2 {
return CommentToken, nil z.tt = CommentToken
return
} }
fallthrough dashCount = 0
default: default:
dashCount = 0 dashCount = 0
} }
} }
panic("unreachable")
} }
// nextTag returns the next TokenType starting from the tag open state. // nextMarkupDeclaration reads the next token starting with "<!".
func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) { // It might be a "<!--comment-->", a "<!DOCTYPE foo>", or "<!malformed text".
c, err := z.readByte() // The opening "<!" has already been consumed.
if err != nil { // Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 2 <= z.p1.
return ErrorToken, err func (z *Tokenizer) nextMarkupDeclaration() {
var c [2]byte
for i := 0; i < 2; i++ {
c[i] = z.readByte()
if z.err != nil {
return
}
} }
if c[0] == '-' && c[1] == '-' {
z.nextComment()
return
}
z.p1 -= 2
const s = "DOCTYPE "
for i := 0; ; i++ {
c := z.readByte()
if z.err != nil {
return
}
// Capitalize c.
if 'a' <= c && c <= 'z' {
c = 'A' + (c - 'a')
}
if i < len(s) && c != s[i] {
z.nextText()
return
}
if c == '>' {
if i >= len(s) {
z.tt = DoctypeToken
}
return
}
}
}
// nextTag reads the next token starting with "<". It might be a "<startTag>",
// an "</endTag>", a "<!markup declaration>", or "<malformed text".
// The opening "<" has already been consumed.
// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1.
func (z *Tokenizer) nextTag() {
c := z.readByte()
if z.err != nil {
return
}
var tt TokenType
switch { switch {
case c == '/': case c == '/':
tt = EndTagToken tt = EndTagToken
@ -232,56 +284,49 @@ func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) {
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
tt = StartTagToken tt = StartTagToken
case c == '!': case c == '!':
return z.nextMarkupDeclaration() z.nextMarkupDeclaration()
return
case c == '?': case c == '?':
return ErrorToken, os.NewError("html: TODO(nigeltao): implement XML processing instructions") z.tt, z.err = ErrorToken, os.NewError("html: TODO: implement XML processing instructions")
return
default: default:
return ErrorToken, os.NewError("html: TODO(nigeltao): handle malformed tags") z.tt, z.err = ErrorToken, os.NewError("html: TODO: handle malformed tags")
return
} }
for { for {
c, err := z.readByte() c := z.readByte()
if err != nil { if z.err != nil {
return TextToken, err return
} }
switch c { switch c {
case '"': case '"', '\'':
err = z.readTo('"') z.readTo(c)
if err != nil { if z.err != nil {
return TextToken, err return
}
case '\'':
err = z.readTo('\'')
if err != nil {
return TextToken, err
} }
case '>': case '>':
z.tt = tt
if z.buf[z.p1-2] == '/' && tt == StartTagToken { if z.buf[z.p1-2] == '/' && tt == StartTagToken {
return SelfClosingTagToken, nil z.tt = SelfClosingTagToken
} }
return tt, nil return
} }
} }
panic("unreachable")
} }
// nextText reads all text up until an '<'. // nextText reads all text up until an '<'.
func (z *Tokenizer) nextText() TokenType { // Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1.
func (z *Tokenizer) nextText() {
for { for {
c, err := z.readByte() c := z.readByte()
if err != nil { if z.err != nil {
z.tt, z.err = ErrorToken, err return
if err == os.EOF {
z.tt = TextToken
}
return z.tt
} }
if c == '<' { if c == '<' {
z.p1-- z.p1--
z.tt = TextToken return
return z.tt
} }
} }
panic("unreachable")
} }
// Next scans the next token and returns its type. // Next scans the next token and returns its type.
@ -292,19 +337,22 @@ func (z *Tokenizer) Next() TokenType {
return z.tt return z.tt
} }
z.p0 = z.p1 z.p0 = z.p1
c, err := z.readByte() c := z.readByte()
if err != nil { if z.err != nil {
z.tt, z.err = ErrorToken, err z.tt = ErrorToken
return z.tt return z.tt
} }
if c == '<' { // We assume that the next token is text unless proven otherwise.
z.tt, z.err = z.nextTag() z.tt = TextToken
if c != '<' {
z.nextText()
} else {
z.nextTag()
if z.tt == CommentToken && !z.ReturnComments { if z.tt == CommentToken && !z.ReturnComments {
continue continue
} }
return z.tt
} }
return z.nextText() return z.tt
} }
panic("unreachable") panic("unreachable")
} }
@ -382,25 +430,29 @@ loop:
return z.buf[i0:i], z.trim(i) return z.buf[i0:i], z.trim(i)
} }
// Text returns the unescaped text of a TextToken or a CommentToken. // Text returns the unescaped text of a text, comment or doctype token. The
// The contents of the returned slice may change on the next call to Next. // contents of the returned slice may change on the next call to Next.
func (z *Tokenizer) Text() []byte { func (z *Tokenizer) Text() []byte {
var i0, i1 int
switch z.tt { switch z.tt {
case TextToken: case TextToken:
s := unescape(z.Raw()) i0 = z.p0
z.p0 = z.p1 i1 = z.p1
return s
case CommentToken: case CommentToken:
// We trim the "<!--" from the left and the "-->" from the right. // Trim the "<!--" from the left and the "-->" from the right.
// "<!-->" is a valid comment, so the adjusted endpoints might overlap. // "<!-->" is a valid comment, so the adjusted endpoints might overlap.
i0 := z.p0 + 4 i0 = z.p0 + 4
i1 := z.p1 - 3 i1 = z.p1 - 3
z.p0 = z.p1 case DoctypeToken:
var s []byte // Trim the "<!DOCTYPE " from the left and the ">" from the right.
if i0 < i1 { i0 = z.p0 + 10
s = unescape(z.buf[i0:i1]) i1 = z.p1 - 1
} default:
return s return nil
}
z.p0 = z.p1
if i0 < i1 {
return unescape(z.buf[i0:i1])
} }
return nil return nil
} }
@ -483,7 +535,7 @@ loop:
func (z *Tokenizer) Token() Token { func (z *Tokenizer) Token() Token {
t := Token{Type: z.tt} t := Token{Type: z.tt}
switch z.tt { switch z.tt {
case TextToken, CommentToken: case TextToken, CommentToken, DoctypeToken:
t.Data = string(z.Text()) t.Data = string(z.Text())
case StartTagToken, EndTagToken, SelfClosingTagToken: case StartTagToken, EndTagToken, SelfClosingTagToken:
var attr []Attribute var attr []Attribute