diff --git a/src/pkg/html/node.go b/src/pkg/html/node.go index 595afd5690..4ecfd6ca23 100644 --- a/src/pkg/html/node.go +++ b/src/pkg/html/node.go @@ -13,6 +13,7 @@ const ( DocumentNode ElementNode CommentNode + DoctypeNode scopeMarkerNode ) diff --git a/src/pkg/html/parse.go b/src/pkg/html/parse.go index 980c470691..519ebe587b 100644 --- a/src/pkg/html/parse.go +++ b/src/pkg/html/parse.go @@ -81,8 +81,8 @@ func (p *parser) popUntil(stopTags []string, matchTags ...string) bool { return false } -// addChild adds a child node n to the top element, and pushes n if it is an -// element node (text nodes are not part of the stack of open elements). +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. func (p *parser) addChild(n *Node) { p.top().Add(n) if n.Type == ElementNode { @@ -90,10 +90,15 @@ func (p *parser) addChild(n *Node) { } } -// addText calls addChild with a text node. +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. func (p *parser) addText(text string) { - // TODO: merge s with previous text, if the preceding node is a text node. // TODO: distinguish whitespace text from others. + t := p.top() + if i := len(t.Child); i > 0 && t.Child[i-1].Type == TextNode { + t.Child[i-1].Data += text + return + } p.addChild(&Node{ Type: TextNode, Data: text, @@ -201,7 +206,15 @@ func useTheRulesFor(p *parser, actual, delegate insertionMode) (insertionMode, b // Section 11.2.5.4.1. func initialIM(p *parser) (insertionMode, bool) { - // TODO: check p.tok for DOCTYPE. + if p.tok.Type == DoctypeToken { + p.addChild(&Node{ + Type: DoctypeNode, + Data: p.tok.Data, + }) + return beforeHTMLIM, true + } + // TODO: set "quirks mode"? It's defined in the DOM spec instead of HTML5 proper, + // and so switching on "quirks mode" might belong in a different package. return beforeHTMLIM, false } diff --git a/src/pkg/html/parse_test.go b/src/pkg/html/parse_test.go index f22fa277bc..7d918d2508 100644 --- a/src/pkg/html/parse_test.go +++ b/src/pkg/html/parse_test.go @@ -85,6 +85,8 @@ func dumpLevel(w io.Writer, n *Node, level int) os.Error { fmt.Fprintf(w, "%q", EscapeString(n.Data)) case CommentNode: return os.NewError("COMMENT") + case DoctypeNode: + fmt.Fprintf(w, "", EscapeString(n.Data)) case scopeMarkerNode: return os.NewError("unexpected scopeMarkerNode") default: @@ -121,7 +123,7 @@ func TestParser(t *testing.T) { rc := make(chan io.Reader) go readDat(filename, rc) // TODO(nigeltao): Process all test cases, not just a subset. - for i := 0; i < 23; i++ { + for i := 0; i < 25; i++ { // Parse the #data section. b, err := ioutil.ReadAll(<-rc) if err != nil { diff --git a/src/pkg/html/token.go b/src/pkg/html/token.go index 5c6ed16662..d280ff2256 100644 --- a/src/pkg/html/token.go +++ b/src/pkg/html/token.go @@ -27,6 +27,8 @@ const ( SelfClosingTagToken // A CommentToken looks like . CommentToken + // A DoctypeToken looks like + DoctypeToken ) // String returns a string representation of the TokenType. @@ -44,6 +46,8 @@ func (t TokenType) String() string { return "SelfClosingTag" case CommentToken: return "Comment" + case DoctypeToken: + return "Doctype" } return "Invalid(" + strconv.Itoa(int(t)) + ")" } @@ -56,9 +60,9 @@ type Attribute struct { } // A Token consists of a TokenType and some Data (tag name for start and end -// tags, content for text and comments). A tag Token may also contain a slice -// of Attributes. Data is unescaped for all Tokens (it looks like "a" case CommentToken: return "" + case DoctypeToken: + return "" } return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" } @@ -109,9 +115,15 @@ type Tokenizer struct { // r is the source of the HTML text. r io.Reader - // tt is the TokenType of the most recently read token. If tt == Error - // then err is the error associated with trying to read that token. - tt TokenType + // tt is the TokenType of the most recently read token. + tt TokenType + // err is the first error encountered during tokenization. It is possible + // for tt != Error && err != nil to hold: this means that Next returned a + // valid token but the subsequent Next call will return an error token. + // For example, if the HTML text input was just "plain", then the first + // Next call would set z.err to os.EOF but return a TextToken, and all + // subsequent Next calls would return an ErrorToken. + // err is never reset. Once it becomes non-nil, it stays non-nil. err os.Error // buf[p0:p1] holds the raw data of the most recent token. // buf[p1:] is buffered input that will yield future tokens. @@ -137,7 +149,9 @@ func (z *Tokenizer) Raw() []byte { // readByte returns the next byte from the input stream, doing a buffered read // from z.r into z.buf if necessary. z.buf[z.p0:z.p1] remains a contiguous byte // slice that holds all the bytes read so far for the current token. -func (z *Tokenizer) readByte() (byte, os.Error) { +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { if z.p1 >= len(z.buf) { // Our buffer is exhausted and we have to read from z.r. // We copy z.buf[z.p0:z.p1] to the beginning of z.buf. If the length @@ -149,82 +163,120 @@ func (z *Tokenizer) readByte() (byte, os.Error) { if 2*d > c { buf1 = make([]byte, d, 2*c) } else { - buf1 = z.buf[0:d] + buf1 = z.buf[:d] } copy(buf1, z.buf[z.p0:z.p1]) - z.p0, z.p1, z.buf = 0, d, buf1[0:d] + z.p0, z.p1, z.buf = 0, d, buf1[:d] // Now that we have copied the live bytes to the start of the buffer, // we read from z.r into the remainder. n, err := z.r.Read(buf1[d:cap(buf1)]) if err != nil { - return 0, err + z.err = err + return 0 } - z.buf = buf1[0 : d+n] + z.buf = buf1[:d+n] } x := z.buf[z.p1] z.p1++ - return x, nil + return x } -// readTo keeps reading bytes until x is found. -func (z *Tokenizer) readTo(x uint8) os.Error { +// readTo keeps reading bytes until x is found or a read error occurs. If an +// error does occur, z.err is set to that error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readTo(x uint8) { for { - c, err := z.readByte() - if err != nil { - return err + c := z.readByte() + if z.err != nil { + return } switch c { case x: - return nil + return case '\\': - _, err = z.readByte() - if err != nil { - return err + z.readByte() + if z.err != nil { + return } } } - panic("unreachable") } -// nextMarkupDeclaration returns the next TokenType starting with ", don't just assume that it's a comment. - for i := 0; i < 2; i++ { - c, err := z.readByte() - if err != nil { - return TextToken, err - } - if c != '-' { - return z.nextText(), nil - } - } +// nextComment reads the next token starting with " is a valid comment. for dashCount := 2; ; { - c, err := z.readByte() - if err != nil { - return TextToken, err + c := z.readByte() + if z.err != nil { + return } switch c { case '-': dashCount++ case '>': if dashCount >= 2 { - return CommentToken, nil + z.tt = CommentToken + return } - fallthrough + dashCount = 0 default: dashCount = 0 } } - panic("unreachable") } -// nextTag returns the next TokenType starting from the tag open state. -func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) { - c, err := z.readByte() - if err != nil { - return ErrorToken, err +// nextMarkupDeclaration reads the next token starting with "", a "", or "' { + if i >= len(s) { + z.tt = DoctypeToken + } + return + } + } +} + +// nextTag reads the next token starting with "<". It might be a "", +// an "", a "", or "': + z.tt = tt if z.buf[z.p1-2] == '/' && tt == StartTagToken { - return SelfClosingTagToken, nil + z.tt = SelfClosingTagToken } - return tt, nil + return } } - panic("unreachable") } // nextText reads all text up until an '<'. -func (z *Tokenizer) nextText() TokenType { +// Pre-condition: z.tt == TextToken && z.err == nil && z.p0 + 1 <= z.p1. +func (z *Tokenizer) nextText() { for { - c, err := z.readByte() - if err != nil { - z.tt, z.err = ErrorToken, err - if err == os.EOF { - z.tt = TextToken - } - return z.tt + c := z.readByte() + if z.err != nil { + return } if c == '<' { z.p1-- - z.tt = TextToken - return z.tt + return } } - panic("unreachable") } // Next scans the next token and returns its type. @@ -292,19 +337,22 @@ func (z *Tokenizer) Next() TokenType { return z.tt } z.p0 = z.p1 - c, err := z.readByte() - if err != nil { - z.tt, z.err = ErrorToken, err + c := z.readByte() + if z.err != nil { + z.tt = ErrorToken return z.tt } - if c == '<' { - z.tt, z.err = z.nextTag() + // We assume that the next token is text unless proven otherwise. + z.tt = TextToken + if c != '<' { + z.nextText() + } else { + z.nextTag() if z.tt == CommentToken && !z.ReturnComments { continue } - return z.tt } - return z.nextText() + return z.tt } panic("unreachable") } @@ -382,25 +430,29 @@ loop: return z.buf[i0:i], z.trim(i) } -// Text returns the unescaped text of a TextToken or a CommentToken. -// The contents of the returned slice may change on the next call to Next. +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. func (z *Tokenizer) Text() []byte { + var i0, i1 int switch z.tt { case TextToken: - s := unescape(z.Raw()) - z.p0 = z.p1 - return s + i0 = z.p0 + i1 = z.p1 case CommentToken: - // We trim the "" from the right. + // Trim the "" from the right. // "" is a valid comment, so the adjusted endpoints might overlap. - i0 := z.p0 + 4 - i1 := z.p1 - 3 - z.p0 = z.p1 - var s []byte - if i0 < i1 { - s = unescape(z.buf[i0:i1]) - } - return s + i0 = z.p0 + 4 + i1 = z.p1 - 3 + case DoctypeToken: + // Trim the "" from the right. + i0 = z.p0 + 10 + i1 = z.p1 - 1 + default: + return nil + } + z.p0 = z.p1 + if i0 < i1 { + return unescape(z.buf[i0:i1]) } return nil } @@ -483,7 +535,7 @@ loop: func (z *Tokenizer) Token() Token { t := Token{Type: z.tt} switch z.tt { - case TextToken, CommentToken: + case TextToken, CommentToken, DoctypeToken: t.Data = string(z.Text()) case StartTagToken, EndTagToken, SelfClosingTagToken: var attr []Attribute