From 7fde0d2b507b989cb9a23d6dbae9acaa13328c53 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 11 Dec 2020 12:55:14 -0500 Subject: [PATCH] [dev.regabi] cmd/compile: remove use of Initorder, Offset Node fields for initorder The initorder pass is already making heavy use of maps, and it is concerned with relatively few nodes (only the assignments in package-level variable declarations). The tracking of init order for these nodes can be done with another map instead of storing the bits directly in the Node representations. This will let us drop Offset_ from AssignStmt and AssignListStmt and drop Initorder from all nodes. Passes buildall w/ toolstash -cmp. Change-Id: I151c64e84670292c2004da4e8e3d0660a88e3df3 Reviewed-on: https://go-review.googlesource.com/c/go/+/277917 Trust: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/initorder.go | 43 ++++++++++----------- src/cmd/compile/internal/ir/mini.go | 12 +----- src/cmd/compile/internal/ir/node.go | 2 - src/cmd/compile/internal/ir/stmt.go | 48 ++++++++++-------------- 4 files changed, 44 insertions(+), 61 deletions(-) diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go index 7f1f3cba926..d39e8189d7a 100644 --- a/src/cmd/compile/internal/gc/initorder.go +++ b/src/cmd/compile/internal/gc/initorder.go @@ -11,7 +11,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/types" ) // Package initialization @@ -69,6 +68,8 @@ type InitOrder struct { // ready is the queue of Pending initialization assignments // that are ready for initialization. ready declOrder + + order map[ir.Node]int } // initOrder computes initialization order for a list l of @@ -82,6 +83,7 @@ func initOrder(l []ir.Node) []ir.Node { } o := InitOrder{ blocking: make(map[ir.Node][]ir.Node), + order: make(map[ir.Node]int), } // Process all package-level assignment in declaration order. @@ -102,7 +104,7 @@ func initOrder(l []ir.Node) []ir.Node { for _, n := range l { switch n.Op() { case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: - if n.Initorder() != InitDone { + if o.order[n] != orderDone { // If there have already been errors // printed, those errors may have // confused us and there might not be @@ -110,7 +112,7 @@ func initOrder(l []ir.Node) []ir.Node { // first. base.ExitIfErrors() - findInitLoopAndExit(firstLHS(n), new([]*ir.Name)) + o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name)) base.Fatalf("initialization unfinished, but failed to identify loop") } } @@ -126,12 +128,10 @@ func initOrder(l []ir.Node) []ir.Node { } func (o *InitOrder) processAssign(n ir.Node) { - if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) + if _, ok := o.order[n]; ok { + base.Fatalf("unexpected state: %v, %v", n, o.order[n]) } - - n.SetInitorder(InitPending) - n.SetOffset(0) + o.order[n] = 0 // Compute number of variable dependencies and build the // inverse dependency ("blocking") graph. @@ -139,38 +139,38 @@ func (o *InitOrder) processAssign(n ir.Node) { defn := dep.Defn // Skip dependencies on functions (PFUNC) and // variables already initialized (InitDone). - if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone { + if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone { continue } - n.SetOffset(n.Offset() + 1) + o.order[n]++ o.blocking[defn] = append(o.blocking[defn], n) } - if n.Offset() == 0 { + if o.order[n] == 0 { heap.Push(&o.ready, n) } } +const orderDone = -1000 + // flushReady repeatedly applies initialize to the earliest (in // declaration order) assignment ready for initialization and updates // the inverse dependency ("blocking") graph. func (o *InitOrder) flushReady(initialize func(ir.Node)) { for o.ready.Len() != 0 { n := heap.Pop(&o.ready).(ir.Node) - if n.Initorder() != InitPending || n.Offset() != 0 { - base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset()) + if order, ok := o.order[n]; !ok || order != 0 { + base.Fatalf("unexpected state: %v, %v, %v", n, ok, order) } initialize(n) - n.SetInitorder(InitDone) - n.SetOffset(types.BADWIDTH) + o.order[n] = orderDone blocked := o.blocking[n] delete(o.blocking, n) for _, m := range blocked { - m.SetOffset(m.Offset() - 1) - if m.Offset() == 0 { + if o.order[m]--; o.order[m] == 0 { heap.Push(&o.ready, m) } } @@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) { // path points to a slice used for tracking the sequence of // variables/functions visited. Using a pointer to a slice allows the // slice capacity to grow and limit reallocations. -func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { +func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { // We implement a simple DFS loop-finding algorithm. This // could be faster, but initialization cycles are rare. @@ -203,11 +203,11 @@ func findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) { *path = append(*path, n) for _, ref := range refers { // Short-circuit variables that were initialized. - if ref.Class() == ir.PEXTERN && ref.Defn.Initorder() == InitDone { + if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone { continue } - findInitLoopAndExit(ref, path) + o.findInitLoopAndExit(ref, path) } *path = (*path)[:len(*path)-1] } @@ -282,9 +282,10 @@ func (d *initDeps) visit(n ir.Node) bool { return false case ir.ONAME: + n := n.(*ir.Name) switch n.Class() { case ir.PEXTERN, ir.PFUNC: - d.foundDep(n.(*ir.Name)) + d.foundDep(n) } case ir.OCLOSURE: diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go index d1d2e266edc..7a945c36902 100644 --- a/src/cmd/compile/internal/ir/mini.go +++ b/src/cmd/compile/internal/ir/mini.go @@ -61,14 +61,12 @@ func (n *miniNode) SetEsc(x uint16) { n.esc = x } const ( miniWalkdefShift = 0 miniTypecheckShift = 2 - miniInitorderShift = 4 - miniDiag = 1 << 6 - miniHasCall = 1 << 7 // for miniStmt + miniDiag = 1 << 4 + miniHasCall = 1 << 5 // for miniStmt ) func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) } func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } -func (n *miniNode) Initorder() uint8 { return n.bits.get2(miniInitorderShift) } func (n *miniNode) SetWalkdef(x uint8) { if x > 3 { panic(fmt.Sprintf("cannot SetWalkdef %d", x)) @@ -81,12 +79,6 @@ func (n *miniNode) SetTypecheck(x uint8) { } n.bits.set2(miniTypecheckShift, x) } -func (n *miniNode) SetInitorder(x uint8) { - if x > 3 { - panic(fmt.Sprintf("cannot SetInitorder %d", x)) - } - n.bits.set2(miniInitorderShift, x) -} func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 } func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) } diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index ccf3671085b..0e73731070f 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -102,8 +102,6 @@ type Node interface { SetBounded(x bool) Typecheck() uint8 SetTypecheck(x uint8) - Initorder() uint8 - SetInitorder(x uint8) NonNil() bool MarkNonNil() HasCall() bool diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index f41c50c92b1..b7d0c1adc45 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -63,10 +63,9 @@ func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) } // If Def is true, the assignment is a :=. type AssignListStmt struct { miniStmt - Lhs Nodes - Def bool - Rhs Nodes - Offset_ int64 // for initorder + Lhs Nodes + Def bool + Rhs Nodes } func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { @@ -75,20 +74,17 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { n.SetOp(op) n.Lhs.Set(lhs) n.Rhs.Set(rhs) - n.Offset_ = types.BADWIDTH return n } -func (n *AssignListStmt) List() Nodes { return n.Lhs } -func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } -func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } -func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } -func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } -func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } -func (n *AssignListStmt) Colas() bool { return n.Def } -func (n *AssignListStmt) SetColas(x bool) { n.Def = x } -func (n *AssignListStmt) Offset() int64 { return n.Offset_ } -func (n *AssignListStmt) SetOffset(x int64) { n.Offset_ = x } +func (n *AssignListStmt) List() Nodes { return n.Lhs } +func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs } +func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x } +func (n *AssignListStmt) Rlist() Nodes { return n.Rhs } +func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs } +func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x } +func (n *AssignListStmt) Colas() bool { return n.Def } +func (n *AssignListStmt) SetColas(x bool) { n.Def = x } func (n *AssignListStmt) SetOp(op Op) { switch op { @@ -103,28 +99,24 @@ func (n *AssignListStmt) SetOp(op Op) { // If Def is true, the assignment is a :=. type AssignStmt struct { miniStmt - X Node - Def bool - Y Node - Offset_ int64 // for initorder + X Node + Def bool + Y Node } func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { n := &AssignStmt{X: x, Y: y} n.pos = pos n.op = OAS - n.Offset_ = types.BADWIDTH return n } -func (n *AssignStmt) Left() Node { return n.X } -func (n *AssignStmt) SetLeft(x Node) { n.X = x } -func (n *AssignStmt) Right() Node { return n.Y } -func (n *AssignStmt) SetRight(y Node) { n.Y = y } -func (n *AssignStmt) Colas() bool { return n.Def } -func (n *AssignStmt) SetColas(x bool) { n.Def = x } -func (n *AssignStmt) Offset() int64 { return n.Offset_ } -func (n *AssignStmt) SetOffset(x int64) { n.Offset_ = x } +func (n *AssignStmt) Left() Node { return n.X } +func (n *AssignStmt) SetLeft(x Node) { n.X = x } +func (n *AssignStmt) Right() Node { return n.Y } +func (n *AssignStmt) SetRight(y Node) { n.Y = y } +func (n *AssignStmt) Colas() bool { return n.Def } +func (n *AssignStmt) SetColas(x bool) { n.Def = x } func (n *AssignStmt) SetOp(op Op) { switch op {