mirror of
https://github.com/golang/go
synced 2024-11-22 22:10:03 -07:00
cmd/compile: don't use OFORUNTIL when implementing range loops
We don't need this special loop construct anymore now that we do conservative GC scanning of the top of stack. Rewrite instead to a simple pointer increment on every iteration. This leads to having a potential past-the-end pointer at the end of the last iteration, but that value immediately goes dead after the loop condition fails, and the past-the-end pointer is never live across any call. This simplifies and speeds up loops. R=go1.20 TODO: actually delete all support for OFORUNTIL. It is now never generated, but code to handle it (e.g. in ssagen) is still around. TODO: in "for _, x := range" loops, we could get rid of the index altogether and use a "pointer to the last element" reference to determine when the loop is complete. Fixes #53409 Change-Id: Ifc141600ff898a8bc6a75f793e575f8862679ba1 Reviewed-on: https://go-review.googlesource.com/c/go/+/414876 Run-TryBot: Keith Randall <khr@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: Heschi Kreinick <heschi@google.com> Reviewed-by: Keith Randall <khr@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
parent
52016be3f4
commit
661146bc0b
@ -53,7 +53,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
// a, v1, v2: not hidden aggregate, val 1, 2
|
// a, v1, v2: not hidden aggregate, val 1, 2
|
||||||
|
|
||||||
a := nrange.X
|
a := nrange.X
|
||||||
t := typecheck.RangeExprType(a.Type())
|
t := a.Type()
|
||||||
lno := ir.SetPos(a)
|
lno := ir.SetPos(a)
|
||||||
|
|
||||||
v1, v2 := nrange.Key, nrange.Value
|
v1, v2 := nrange.Key, nrange.Value
|
||||||
@ -70,20 +70,27 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
base.Fatalf("walkRange: v2 != nil while v1 == nil")
|
base.Fatalf("walkRange: v2 != nil while v1 == nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
var ifGuard *ir.IfStmt
|
|
||||||
|
|
||||||
var body []ir.Node
|
var body []ir.Node
|
||||||
var init []ir.Node
|
var init []ir.Node
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
default:
|
default:
|
||||||
base.Fatalf("walkRange")
|
base.Fatalf("walkRange")
|
||||||
|
|
||||||
case types.TARRAY, types.TSLICE:
|
case types.TARRAY, types.TSLICE, types.TPTR: // TPTR is pointer-to-array
|
||||||
if nn := arrayClear(nrange, v1, v2, a); nn != nil {
|
if nn := arrayClear(nrange, v1, v2, a); nn != nil {
|
||||||
base.Pos = lno
|
base.Pos = lno
|
||||||
return nn
|
return nn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Element type of the iteration
|
||||||
|
var elem *types.Type
|
||||||
|
switch t.Kind() {
|
||||||
|
case types.TSLICE, types.TARRAY:
|
||||||
|
elem = t.Elem()
|
||||||
|
case types.TPTR:
|
||||||
|
elem = t.Elem().Elem()
|
||||||
|
}
|
||||||
|
|
||||||
// order.stmt arranged for a copy of the array/slice variable if needed.
|
// order.stmt arranged for a copy of the array/slice variable if needed.
|
||||||
ha := a
|
ha := a
|
||||||
|
|
||||||
@ -108,7 +115,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for v1, v2 := range ha { body }
|
// for v1, v2 := range ha { body }
|
||||||
if cheapComputableIndex(t.Elem().Size()) {
|
if cheapComputableIndex(elem.Size()) {
|
||||||
// v1, v2 = hv1, ha[hv1]
|
// v1, v2 = hv1, ha[hv1]
|
||||||
tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
|
tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
|
||||||
tmp.SetBounded(true)
|
tmp.SetBounded(true)
|
||||||
@ -128,25 +135,41 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
// TODO(austin): OFORUNTIL inhibits bounds-check
|
// TODO(austin): OFORUNTIL inhibits bounds-check
|
||||||
// elimination on the index variable (see #20711).
|
// elimination on the index variable (see #20711).
|
||||||
// Enhance the prove pass to understand this.
|
// Enhance the prove pass to understand this.
|
||||||
ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
|
|
||||||
ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
|
|
||||||
nfor.SetOp(ir.OFORUNTIL)
|
|
||||||
|
|
||||||
hp := typecheck.Temp(types.NewPtr(t.Elem()))
|
// Slice to iterate over
|
||||||
tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
|
var hs ir.Node
|
||||||
tmp.SetBounded(true)
|
if t.IsSlice() {
|
||||||
init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
|
hs = ha
|
||||||
|
} else {
|
||||||
|
var arr ir.Node
|
||||||
|
if t.IsPtr() {
|
||||||
|
arr = ha
|
||||||
|
} else {
|
||||||
|
arr = typecheck.NodAddr(ha)
|
||||||
|
arr.SetType(t.PtrTo())
|
||||||
|
arr.SetTypecheck(1)
|
||||||
|
}
|
||||||
|
hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil)
|
||||||
|
// old typechecker doesn't know OSLICEARR, so we set types explicitly
|
||||||
|
hs.SetType(types.NewSlice(elem))
|
||||||
|
hs.SetTypecheck(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pointer to current iteration position
|
||||||
|
hp := typecheck.Temp(types.NewPtr(elem))
|
||||||
|
init = append(init, ir.NewAssignStmt(base.Pos, hp, ir.NewUnaryExpr(base.Pos, ir.OSPTR, hs)))
|
||||||
|
|
||||||
a := rangeAssign2(nrange, hv1, ir.NewStarExpr(base.Pos, hp))
|
a := rangeAssign2(nrange, hv1, ir.NewStarExpr(base.Pos, hp))
|
||||||
body = append(body, a)
|
body = append(body, a)
|
||||||
|
|
||||||
// Advance pointer as part of the late increment.
|
// Advance pointer for next iteration of the loop.
|
||||||
//
|
// Note: this pointer is now potentially a past-the-end pointer, so
|
||||||
// This runs *after* the condition check, so we know
|
// we need to make sure this pointer is never seen by the GC except
|
||||||
// advancing the pointer is safe and won't go past the
|
// during a conservative scan. Fortunately, the next thing we're going
|
||||||
// end of the allocation.
|
// to do is check the loop bounds and exit, so it doesn't live very long
|
||||||
as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Size()))
|
// (in particular, it doesn't live across any function call).
|
||||||
nfor.Late = []ir.Node{typecheck.Stmt(as)}
|
as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, elem.Size()))
|
||||||
|
nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
|
||||||
|
|
||||||
case types.TMAP:
|
case types.TMAP:
|
||||||
// order.stmt allocated the iterator for us.
|
// order.stmt allocated the iterator for us.
|
||||||
@ -275,12 +298,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
|
|
||||||
typecheck.Stmts(init)
|
typecheck.Stmts(init)
|
||||||
|
|
||||||
if ifGuard != nil {
|
nfor.PtrInit().Append(init...)
|
||||||
ifGuard.PtrInit().Append(init...)
|
|
||||||
ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
|
|
||||||
} else {
|
|
||||||
nfor.PtrInit().Append(init...)
|
|
||||||
}
|
|
||||||
|
|
||||||
typecheck.Stmts(nfor.Cond.Init())
|
typecheck.Stmts(nfor.Cond.Init())
|
||||||
|
|
||||||
@ -292,10 +310,6 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
|
|||||||
nfor.Body.Append(nrange.Body...)
|
nfor.Body.Append(nrange.Body...)
|
||||||
|
|
||||||
var n ir.Node = nfor
|
var n ir.Node = nfor
|
||||||
if ifGuard != nil {
|
|
||||||
ifGuard.Body = []ir.Node{n}
|
|
||||||
n = ifGuard
|
|
||||||
}
|
|
||||||
|
|
||||||
n = walkStmt(n)
|
n = walkStmt(n)
|
||||||
|
|
||||||
|
@ -735,8 +735,8 @@ func range1(b []int) {
|
|||||||
|
|
||||||
// range2 elements are larger, so they use the general form of a range loop.
|
// range2 elements are larger, so they use the general form of a range loop.
|
||||||
func range2(b [][32]int) {
|
func range2(b [][32]int) {
|
||||||
for i, v := range b {
|
for i, v := range b { // ERROR "Induction variable: limits \[0,\?\), increment 1$"
|
||||||
b[i][0] = v[0] + 1 // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Proved IsInBounds$"
|
b[i][0] = v[0] + 1 // ERROR "Proved IsInBounds$"
|
||||||
if i < len(b) { // ERROR "Proved Less64$"
|
if i < len(b) { // ERROR "Proved Less64$"
|
||||||
println("x")
|
println("x")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user