1
0
mirror of https://github.com/golang/go synced 2024-11-11 19:51:37 -07:00

[dev.regabi] cmd/compile: split up walkexpr1, walkstmt [generated]

walkexpr1 is the second largest non-machine-generated function in the compiler.
weighing in at 1,164 lines. Since we are destroying the git blame history
anyway, now is a good time to split each different case into its own function,
making future work on this function more manageable.
Do the same to walkstmt too for consistency, even though it is a paltry 259 lines.

[git-generate]
cd src/cmd/compile/internal/walk
rf '
	mv addstr walkAddString
	mv walkCall walkCall1
	mv walkpartialcall walkCallPart
	mv walkclosure walkClosure
	mv walkrange walkRange
	mv walkselect walkSelect
	mv walkselectcases walkSelectCases
	mv walkswitch walkSwitch
	mv walkExprSwitch walkSwitchExpr
	mv walkTypeSwitch walkSwitchType
	mv walkstmt walkStmt
	mv walkstmtlist walkStmtList
	mv walkexprlist walkExprList
	mv walkexprlistsafe walkExprListSafe
	mv walkexprlistcheap walkExprListCheap
	mv walkexpr walkExpr
	mv walkexpr1 walkExpr1
	mv walkprint walkPrint
	mv walkappend walkAppend
	mv walkcompare walkCompare
	mv walkcompareInterface walkCompareInterface
	mv walkcompareString walkCompareString

	mv appendslice appendSlice
	mv cheapexpr cheapExpr
	mv copyany walkCopy
	mv copyexpr copyExpr
	mv eqfor eqFor
	mv extendslice extendSlice
	mv finishcompare finishCompare
	mv safeexpr safeExpr

	mv walkStmt:/^\tcase ir.ORECV:/+2,/^\tcase /-2 walkRecv
	add walk.go:/^func walkRecv/-0 \
		// walkRecv walks an ORECV node.
	mv walkStmt:/^\tcase ir.ODCL:/+2,/^\tcase /-2 walkDecl
	add walk.go:/^func walkDecl/-0 \
		// walkDecl walks an ODCL node.
	mv walkStmt:/^\tcase ir.OGO:/+2,/^\tcase /-2 walkGoDefer
	add walk.go:/^func walkGoDefer/-0 \
		// walkGoDefer walks an OGO or ODEFER node.
	mv walkStmt:/^\tcase ir.OFOR,/+2,/^\tcase /-2 walkFor
	add walk.go:/^func walkFor/-0 \
		// walkFor walks an OFOR or OFORUNTIL node.
	mv walkStmt:/^\tcase ir.OIF:/+2,/^\tcase /-2 walkIf
	add walk.go:/^func walkIf/-0 \
		// walkIf walks an OIF node.
	mv walkStmt:/^\tcase ir.ORETURN:/+2,/^\tcase /-2 walkReturn
	add walk.go:/^func walkReturn/-0 \
		// walkReturn walks an ORETURN node.

	mv walkExpr1:/^\tcase ir.ODOT,/+2,/^\tcase /-2 walkDot
	add walk.go:/^func walkDot/-0 \
		// walkDot walks an ODOT or ODOTPTR node.
	mv walkExpr1:/^\tcase ir.ODOTTYPE,/+2,/^\tcase /-2 walkDotType
	add walk.go:/^func walkDotType/-0 \
		// walkDotType walks an ODOTTYPE or ODOTTYPE2 node.
	mv walkExpr1:/^\tcase ir.OLEN,/+2,/^\tcase /-2 walkLenCap
	add walk.go:/^func walkLenCap/-0 \
		// walkLenCap walks an OLEN or OCAP node.
	mv walkExpr1:/^\tcase ir.OANDAND,/+2,/^\tcase /-2 walkLogical
	add walk.go:/^func walkLogical/-0 \
		// walkLogical walks an OANDAND or OOROR node.
	mv walkExpr1:/^\tcase ir.OCALLINTER,/+2,/^\tcase /-2 walkCall
	add walk.go:/^func walkCall/-0 \
		// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node.
	mv walkExpr1:/^\tcase ir.OAS,/+1,/^\tcase /-2 walkAssign
	add walk.go:/^func walkAssign/-0 \
		// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
	mv walkExpr1:/^\tcase ir.OAS2:/+2,/^\tcase /-3 walkAssignList
	add walk.go:/^func walkAssignList/-0 \
		// walkAssignList walks an OAS2 node.
	mv walkExpr1:/^\tcase ir.OAS2FUNC:/+2,/^\tcase /-4 walkAssignFunc
	add walk.go:/^func walkAssignFunc/-0 \
		// walkAssignFunc walks an OAS2FUNC node.
	mv walkExpr1:/^\tcase ir.OAS2RECV:/+2,/^\tcase /-3 walkAssignRecv
	add walk.go:/^func walkAssignRecv/-0 \
		// walkAssignRecv walks an OAS2RECV node.
	mv walkExpr1:/^\tcase ir.OAS2MAPR:/+2,/^\tcase /-2 walkAssignMapRead
	add walk.go:/^func walkAssignMapRead/-0 \
		// walkAssignMapRead walks an OAS2MAPR node.
	mv walkExpr1:/^\tcase ir.ODELETE:/+2,/^\tcase /-2 walkDelete
	add walk.go:/^func walkDelete/-0 \
		// walkDelete walks an ODELETE node.
	mv walkExpr1:/^\tcase ir.OAS2DOTTYPE:/+2,/^\tcase /-2 walkAssignDotType
	add walk.go:/^func walkAssignDotType/-0 \
		// walkAssignDotType walks an OAS2DOTTYPE node.
	mv walkExpr1:/^\tcase ir.OCONVIFACE:/+2,/^\tcase /-2 walkConvInterface
	add walk.go:/^func walkConvInterface/-0 \
		// walkConvInterface walks an OCONVIFACE node.
	mv walkExpr1:/^\tcase ir.OCONV,/+2,/^\tcase /-2 walkConv
	add walk.go:/^func walkConv/-0 \
		// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
	mv walkExpr1:/^\tcase ir.ODIV,/+2,/^\tcase /-2 walkDivMod
	add walk.go:/^func walkDivMod/-0 \
		// walkDivMod walks an ODIV or OMOD node.
	mv walkExpr1:/^\tcase ir.OINDEX:/+2,/^\tcase /-2 walkIndex
	add walk.go:/^func walkIndex/-0 \
		// walkIndex walks an OINDEX node.
	# move type assertion above comment
	mv walkExpr1:/^\tcase ir.OINDEXMAP:/+/n := n/-+ walkExpr1:/^\tcase ir.OINDEXMAP:/+0
	mv walkExpr1:/^\tcase ir.OINDEXMAP:/+2,/^\tcase /-2 walkIndexMap
	add walk.go:/^func walkIndexMap/-0 \
		// walkIndexMap walks an OINDEXMAP node.
	mv walkExpr1:/^\tcase ir.OSLICEHEADER:/+2,/^\tcase /-2 walkSliceHeader
	add walk.go:/^func walkSliceHeader/-0 \
		// walkSliceHeader walks an OSLICEHEADER node.
	mv walkExpr1:/^\tcase ir.OSLICE,/+2,/^\tcase /-2 walkSlice
	add walk.go:/^func walkSlice/-0 \
		// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
	mv walkExpr1:/^\tcase ir.ONEW:/+2,/^\tcase /-2 walkNew
	add walk.go:/^func walkNew/-0 \
		// walkNew walks an ONEW node.
	# move type assertion above comment
	mv walkExpr1:/^\tcase ir.OCLOSE:/+/n := n/-+ walkExpr1:/^\tcase ir.OCLOSE:/+0
	mv walkExpr1:/^\tcase ir.OCLOSE:/+2,/^\tcase /-2 walkClose
	add walk.go:/^func walkClose/-0 \
		// walkClose walks an OCLOSE node.
	# move type assertion above comment
	mv walkExpr1:/^\tcase ir.OMAKECHAN:/+/n := n/-+ walkExpr1:/^\tcase ir.OMAKECHAN:/+0
	mv walkExpr1:/^\tcase ir.OMAKECHAN:/+2,/^\tcase /-2 walkMakeChan
	add walk.go:/^func walkMakeChan/-0 \
		// walkMakeChan walks an OMAKECHAN node.
	mv walkExpr1:/^\tcase ir.OMAKEMAP:/+2,/^\tcase /-2 walkMakeMap
	add walk.go:/^func walkMakeMap/-0 \
		// walkMakeMap walks an OMAKEMAP node.
	mv walkExpr1:/^\tcase ir.OMAKESLICE:/+2,/^\tcase /-2 walkMakeSlice
	add walk.go:/^func walkMakeSlice/-0 \
		// walkMakeSlice walks an OMAKESLICE node.
	mv walkExpr1:/^\tcase ir.OMAKESLICECOPY:/+2,/^\tcase /-2 walkMakeSliceCopy
	add walk.go:/^func walkMakeSliceCopy/-0 \
		// walkMakeSliceCopy walks an OMAKESLICECOPY node.
	mv walkExpr1:/^\tcase ir.ORUNESTR:/+2,/^\tcase /-2 walkRuneToString
	add walk.go:/^func walkRuneToString/-0 \
		// walkRuneToString walks an ORUNESTR node.
	mv walkExpr1:/^\tcase ir.OBYTES2STR,/+2,/^\tcase /-2 walkBytesRunesToString
	add walk.go:/^func walkBytesRunesToString/-0 \
		// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
	mv walkExpr1:/^\tcase ir.OBYTES2STRTMP:/+2,/^\tcase /-2 walkBytesToStringTemp
	add walk.go:/^func walkBytesToStringTemp/-0 \
		// walkBytesToStringTemp walks an OBYTES2STRTMP node.
	mv walkExpr1:/^\tcase ir.OSTR2BYTES:/+2,/^\tcase /-2 walkStringToBytes
	add walk.go:/^func walkStringToBytes/-0 \
		// walkStringToBytes walks an OSTR2BYTES node.
	# move type assertion above comment
	mv walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+/n := n/-+ walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+0
	mv walkExpr1:/^\tcase ir.OSTR2BYTESTMP:/+2,/^\tcase /-2 walkStringToBytesTemp
	add walk.go:/^func walkStringToBytesTemp/-0 \
		// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
	mv walkExpr1:/^\tcase ir.OSTR2RUNES:/+2,/^\tcase /-2 walkStringToRunes
	add walk.go:/^func walkStringToRunes/-0 \
		// walkStringToRunes walks an OSTR2RUNES node.
	mv walkExpr1:/^\tcase ir.OARRAYLIT,/+1,/^\tcase /-2 walkCompLit
	add walk.go:/^func walkCompLit/-0 \
		// walkCompLit walks a composite literal node: \
		// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
	mv walkExpr1:/^\tcase ir.OSEND:/+2,/^\tcase /-2 walkSend
	add walk.go:/^func walkSend/-0 \
		// walkSend walks an OSEND node.

	mv walkStmt walkStmtList \
		walkDecl \
		walkFor \
		walkGoDefer \
		walkIf \
		wrapCall \
		stmt.go

	mv walkExpr walkExpr1 walkExprList walkExprListCheap walkExprListSafe \
		cheapExpr safeExpr copyExpr \
		walkAddString \
		walkCall \
		walkCall1 \
		walkDivMod \
		walkDot \
		walkDotType \
		walkIndex \
		walkIndexMap \
		walkLogical \
		walkSend \
		walkSlice \
		walkSliceHeader \
		reduceSlice \
		bounded \
		usemethod \
		usefield \
		expr.go

	mv \
		walkAssign \
		walkAssignDotType \
		walkAssignFunc \
		walkAssignList \
		walkAssignMapRead \
		walkAssignRecv \
		walkReturn \
		fncall \
		ascompatee \
		ascompatee1 \
		ascompatet \
		reorder3 \
		reorder3save \
		aliased \
		anyAddrTaken \
		refersToName \
		refersToCommonName \
		appendSlice \
		isAppendOfMake \
		extendSlice \
		assign.go

	mv \
		walkCompare \
		walkCompareInterface \
		walkCompareString \
		finishCompare \
		eqFor \
		brcom \
		brrev \
		tracecmpArg \
		canMergeLoads \
		compare.go

	mv \
		walkConv \
		walkConvInterface \
		walkBytesRunesToString \
		walkBytesToStringTemp \
		walkRuneToString \
		walkStringToBytes \
		walkStringToBytesTemp \
		walkStringToRunes \
		convFuncName \
		rtconvfn \
		byteindex \
		walkCheckPtrAlignment \
		walkCheckPtrArithmetic \
		convert.go

	mv \
		walkAppend \
		walkClose \
		walkCopy \
		walkDelete \
		walkLenCap \
		walkMakeChan \
		walkMakeMap \
		walkMakeSlice \
		walkMakeSliceCopy \
		walkNew \
		walkPrint \
		badtype \
		callnew \
		writebarrierfn \
		isRuneCount \
		builtin.go

	mv \
		walkCompLit \
		sinit.go \
		complit.go

	mv subr.go walk.go
'

Change-Id: Ie0cf3ba4adf363c120c134d57cb7ef37934eaab9
Reviewed-on: https://go-review.googlesource.com/c/go/+/279430
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
Russ Cox 2020-12-23 01:07:07 -05:00
parent e4895ab4c0
commit 3f04d964ab
13 changed files with 4191 additions and 3916 deletions

View File

@ -0,0 +1,920 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
init.Append(n.PtrInit().Take()...)
var left, right ir.Node
switch n.Op() {
case ir.OAS:
n := n.(*ir.AssignStmt)
left, right = n.X, n.Y
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
left, right = n.X, n.Y
}
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
var mapAppend *ir.CallExpr
if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
left := left.(*ir.IndexExpr)
mapAppend = right.(*ir.CallExpr)
if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
}
}
left = walkExpr(left, init)
left = safeExpr(left, init)
if mapAppend != nil {
mapAppend.Args[0] = left
}
if n.Op() == ir.OASOP {
// Rewrite x op= y into x = x op y.
n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
} else {
n.(*ir.AssignStmt).X = left
}
as := n.(*ir.AssignStmt)
if oaslit(as, init) {
return ir.NewBlockStmt(as.Pos(), nil)
}
if as.Y == nil {
// TODO(austin): Check all "implicit zeroing"
return as
}
if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
return as
}
switch as.Y.Op() {
default:
as.Y = walkExpr(as.Y, init)
case ir.ORECV:
// x = <-c; as.Left is x, as.Right.Left is c.
// order.stmt made sure x is addressable.
recv := as.Y.(*ir.UnaryExpr)
recv.X = walkExpr(recv.X, init)
n1 := typecheck.NodAddr(as.X)
r := recv.X // the channel
return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
case ir.OAPPEND:
// x = append(...)
call := as.Y.(*ir.CallExpr)
if call.Type().Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
}
var r ir.Node
switch {
case isAppendOfMake(call):
// x = append(y, make([]T, y)...)
r = extendSlice(call, init)
case call.IsDDD:
r = appendSlice(call, init) // also works for append(slice, string).
default:
r = walkAppend(call, init, as)
}
as.Y = r
if r.Op() == ir.OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
return as
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
if as.X != nil && as.Y != nil {
return convas(as, init)
}
return as
}
// walkAssignDotType walks an OAS2DOTTYPE node.
func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node {
walkExprListSafe(n.Lhs, init)
n.Rhs[0] = walkExpr(n.Rhs[0], init)
return n
}
// walkAssignFunc walks an OAS2FUNC node.
func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
init.Append(n.PtrInit().Take()...)
r := n.Rhs[0]
walkExprListSafe(n.Lhs, init)
r = walkExpr(r, init)
if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
n.Rhs = []ir.Node{r}
return n
}
init.Append(r)
ll := ascompatet(n.Lhs, r.Type())
return ir.NewBlockStmt(src.NoXPos, ll)
}
// walkAssignList walks an OAS2 node.
func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
init.Append(n.PtrInit().Take()...)
walkExprListSafe(n.Lhs, init)
walkExprListSafe(n.Rhs, init)
return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init))
}
// walkAssignMapRead walks an OAS2MAPR node.
func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
init.Append(n.PtrInit().Take()...)
r := n.Rhs[0].(*ir.IndexExpr)
walkExprListSafe(n.Lhs, init)
r.X = walkExpr(r.X, init)
r.Index = walkExpr(r.Index, init)
t := r.X.Type()
fast := mapfast(t)
var key ir.Node
if fast != mapslow {
// fast versions take key by value
key = r.Index
} else {
// standard version takes key by reference
// order.expr made sure key is addressable.
key = typecheck.NodAddr(r.Index)
}
// from:
// a,b = m[i]
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
a := n.Lhs[0]
var call *ir.CallExpr
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := reflectdata.ZeroAddr(w)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
call.Type().Field(1).Type = ok.Type()
}
n.Rhs = []ir.Node{call}
n.SetOp(ir.OAS2FUNC)
// don't generate a = *var if a is _
if ir.IsBlank(a) {
return walkExpr(typecheck.Stmt(n), init)
}
var_ := typecheck.Temp(types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.MarkNonNil() // mapaccess always returns a non-nil pointer
n.Lhs[0] = var_
init.Append(walkExpr(n, init))
as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
return walkExpr(typecheck.Stmt(as), init)
}
// walkAssignRecv walks an OAS2RECV node.
func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
init.Append(n.PtrInit().Take()...)
r := n.Rhs[0].(*ir.UnaryExpr) // recv
walkExprListSafe(n.Lhs, init)
r.X = walkExpr(r.X, init)
var n1 ir.Node
if ir.IsBlank(n.Lhs[0]) {
n1 = typecheck.NodNil()
} else {
n1 = typecheck.NodAddr(n.Lhs[0])
}
fn := chanfn("chanrecv2", 2, r.X.Type())
ok := n.Lhs[1]
call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
}
// walkReturn walks an ORETURN node.
func walkReturn(n *ir.ReturnStmt) ir.Node {
ir.CurFunc.NumReturns++
if len(n.Results) == 0 {
return n
}
if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) {
// assign to the function out parameters,
// so that ascompatee can fix up conflicts
var rl []ir.Node
for _, ln := range ir.CurFunc.Dcl {
cl := ln.Class_
if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
if cl == ir.PPARAMOUT {
var ln ir.Node = ln
if ir.IsParamStackCopy(ln) {
ln = walkExpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil)
}
rl = append(rl, ln)
}
}
if got, want := len(n.Results), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
base.Fatalf("expected %v return arguments, have %v", want, got)
}
// move function calls out, to make ascompatee's job easier.
walkExprListSafe(n.Results, n.PtrInit())
n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit()))
return n
}
walkExprList(n.Results, n.PtrInit())
// For each return parameter (lhs), assign the corresponding result (rhs).
lhs := ir.CurFunc.Type().Results()
rhs := n.Results
res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
nname := ir.AsNode(nl.Nname)
if ir.IsParamHeapCopy(nname) {
nname = nname.Name().Stackcopy
}
a := ir.NewAssignStmt(base.Pos, nname, rhs[i])
res[i] = convas(a, n.PtrInit())
}
n.Results.Set(res)
return n
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
func fncall(l ir.Node, rt *types.Type) bool {
if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
if types.Identical(l.Type(), rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
return true
}
func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
// ensure order of evaluation for function calls
for i := range nl {
nl[i] = safeExpr(nl[i], init)
}
for i1 := range nr {
nr[i1] = safeExpr(nr[i1], init)
}
var nn []*ir.AssignStmt
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
break
}
// Do not generate 'x = x' during return. See issue 4014.
if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) {
continue
}
nn = append(nn, ascompatee1(nl[i], nr[i], init))
}
// cannot happen: caller checked that lists had same length
if i < len(nl) || i < len(nr) {
var nln, nrn ir.Nodes
nln.Set(nl)
nrn.Set(nr)
base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc))
}
return reorder3(nn)
}
func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := ir.NewAssignStmt(base.Pos, l, r)
if l.Op() == ir.OINDEXMAP {
return n
}
return convas(n, init)
}
// check assign type list to
// an expression list. called in
// expr-list = func()
func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if len(nl) != nr.NumFields() {
base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
}
var nn, mm ir.Nodes
for i, l := range nl {
if ir.IsBlank(l) {
continue
}
r := nr.Field(i)
// Any assignment to an lvalue that might cause a function call must be
// deferred until all the returned values have been read.
if fncall(l, r.Type) {
tmp := ir.Node(typecheck.Temp(r.Type))
tmp = typecheck.Expr(tmp)
a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
mm.Append(a)
l = tmp
}
res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
res.Offset = base.Ctxt.FixedFrameSize() + r.Offset
res.SetType(r.Type)
res.SetTypecheck(1)
a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
updateHasCall(a)
if a.HasCall() {
ir.Dump("ascompatet ucount", a)
base.Fatalf("ascompatet: too many function calls evaluating parameters")
}
nn.Append(a)
}
return append(nn, mm...)
}
// reorder3
// from ascompatee
// a,b = c,d
// simultaneous assignment. there cannot
// be later use of an earlier lvalue.
//
// function calls have been removed.
func reorder3(all []*ir.AssignStmt) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
var early []ir.Node
var mapinit ir.Nodes
for i, n := range all {
l := n.X
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
switch ll := l; ll.Op() {
case ir.ODOT:
ll := ll.(*ir.SelectorExpr)
l = ll.X
continue
case ir.OPAREN:
ll := ll.(*ir.ParenExpr)
l = ll.X
continue
case ir.OINDEX:
ll := ll.(*ir.IndexExpr)
if ll.X.Type().IsArray() {
ll.Index = reorder3save(ll.Index, all, i, &early)
l = ll.X
continue
}
}
break
}
switch l.Op() {
default:
base.Fatalf("reorder3 unexpected lvalue %v", l.Op())
case ir.ONAME:
break
case ir.OINDEX, ir.OINDEXMAP:
l := l.(*ir.IndexExpr)
l.X = reorder3save(l.X, all, i, &early)
l.Index = reorder3save(l.Index, all, i, &early)
if l.Op() == ir.OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
case ir.ODEREF:
l := l.(*ir.StarExpr)
l.X = reorder3save(l.X, all, i, &early)
case ir.ODOTPTR:
l := l.(*ir.SelectorExpr)
l.X = reorder3save(l.X, all, i, &early)
}
// Save expression on right side.
all[i].Y = reorder3save(all[i].Y, all, i, &early)
}
early = append(mapinit, early...)
for _, as := range all {
early = append(early, as)
}
return early
}
// if the evaluation of *np would be affected by the
// assignments in all up to but not including the ith assignment,
// copy into a temporary during *early and
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
q := ir.Node(typecheck.Temp(n.Type()))
as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n))
*early = append(*early, as)
return q
}
// Is it possible that the computation of r might be
// affected by assignments in all?
func aliased(r ir.Node, all []*ir.AssignStmt) bool {
if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
for r.Op() == ir.ODOT {
r = r.(*ir.SelectorExpr).X
}
// Look for obvious aliasing: a variable being assigned
// during the all list and appearing in n.
// Also record whether there are any writes to addressable
// memory (either main memory or variables whose addresses
// have been taken).
memwrite := false
for _, as := range all {
// We can ignore assignments to blank.
if ir.IsBlank(as.X) {
continue
}
lv := ir.OuterValue(as.X)
if lv.Op() != ir.ONAME {
memwrite = true
continue
}
l := lv.(*ir.Name)
switch l.Class_ {
default:
base.Fatalf("unexpected class: %v, %v", l, l.Class_)
case ir.PAUTOHEAP, ir.PEXTERN:
memwrite = true
continue
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
if l.Name().Addrtaken() {
memwrite = true
continue
}
if refersToName(l, r) {
// Direct hit: l appears in r.
return true
}
}
}
// The variables being written do not appear in r.
// However, r might refer to computed addresses
// that are being written.
// If no computed addresses are affected by the writes, no aliasing.
if !memwrite {
return false
}
// If r does not refer to any variables whose addresses have been taken,
// then the only possible writes to r would be directly to the variables,
// and we checked those above, so no aliasing problems.
if !anyAddrTaken(r) {
return false
}
// Otherwise, both the writes and r refer to computed memory addresses.
// Assume that they might conflict.
return true
}
// anyAddrTaken reports whether the evaluation n,
// which appears on the left side of an assignment,
// may refer to variables whose addresses have been taken.
func anyAddrTaken(n ir.Node) bool {
return ir.Any(n, func(n ir.Node) bool {
switch n.Op() {
case ir.ONAME:
n := n.(*ir.Name)
return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken()
case ir.ODOT: // but not ODOTPTR - should have been handled in aliased.
base.Fatalf("anyAddrTaken unexpected ODOT")
case ir.OADD,
ir.OAND,
ir.OANDAND,
ir.OANDNOT,
ir.OBITNOT,
ir.OCONV,
ir.OCONVIFACE,
ir.OCONVNOP,
ir.ODIV,
ir.ODOTTYPE,
ir.OLITERAL,
ir.OLSH,
ir.OMOD,
ir.OMUL,
ir.ONEG,
ir.ONIL,
ir.OOR,
ir.OOROR,
ir.OPAREN,
ir.OPLUS,
ir.ORSH,
ir.OSUB,
ir.OXOR:
return false
}
// Be conservative.
return true
})
}
// refersToName reports whether r refers to name.
func refersToName(name *ir.Name, r ir.Node) bool {
return ir.Any(r, func(r ir.Node) bool {
return r.Op() == ir.ONAME && r == name
})
}
// refersToCommonName reports whether any name
// appears in common between l and r.
// This is called from sinit.go.
func refersToCommonName(l ir.Node, r ir.Node) bool {
if l == nil || r == nil {
return false
}
// This could be written elegantly as a Find nested inside a Find:
//
// found := ir.Find(l, func(l ir.Node) interface{} {
// if l.Op() == ir.ONAME {
// return ir.Find(r, func(r ir.Node) interface{} {
// if r.Op() == ir.ONAME && l.Name() == r.Name() {
// return r
// }
// return nil
// })
// }
// return nil
// })
// return found != nil
//
// But that would allocate a new closure for the inner Find
// for each name found on the left side.
// It may not matter at all, but the below way of writing it
// only allocates two closures, not O(|L|) closures.
var doL, doR func(ir.Node) error
var targetL *ir.Name
doR = func(r ir.Node) error {
if r.Op() == ir.ONAME && r.Name() == targetL {
return stop
}
return ir.DoChildren(r, doR)
}
doL = func(l ir.Node) error {
if l.Op() == ir.ONAME {
l := l.(*ir.Name)
targetL = l.Name()
if doR(r) == stop {
return stop
}
}
return ir.DoChildren(l, doL)
}
return doL(l) == stop
}
// expand append(l1, l2...) to
// init {
// s := l1
// n := len(s) + len(l2)
// // Compare as uint so growslice can panic on overflow.
// if uint(n) > uint(cap(s)) {
// s = growslice(s, n)
// }
// s = s[:n]
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
// }
// s
//
// l2 is allowed to be a string.
func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
l1 := n.Args[0]
l2 := n.Args[1]
l2 = cheapExpr(l2, init)
n.Args[1] = l2
var nodes ir.Nodes
// var s []T
s := typecheck.Temp(l1.Type())
nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
elemtype := s.Type().Elem()
// n := len(s) + len(l2)
nn := typecheck.Temp(types.Types[types.TINT])
nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))))
// if uint(n) > uint(cap(s))
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
nuint := typecheck.Conv(nn, types.Types[types.TUINT])
scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)
// instantiate growslice(typ *type, []any, int) []any
fn := typecheck.LookupRuntime("growslice")
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes.Append(nif)
// s = s[:n]
nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
nodes.Append(ir.NewAssignStmt(base.Pos, s, nt))
var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
slice.SetType(s.Type())
slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
ir.CurFunc.SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := typecheck.LookupRuntime("typedslicecopy")
fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
} else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
slice.SetType(s.Type())
slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
fn := typecheck.LookupRuntime("slicecopy")
fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
ix.SetBounded(true)
addr := typecheck.NodAddr(ix)
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width))
// instantiate func memmove(to *any, frm *any, length uintptr)
fn := typecheck.LookupRuntime("memmove")
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
}
ln := append(nodes, ncopy)
typecheck.Stmts(ln)
walkStmtList(ln)
init.Append(ln...)
return s
}
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
func isAppendOfMake(n ir.Node) bool {
if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
return false
}
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
}
if n.Op() != ir.OAPPEND {
return false
}
call := n.(*ir.CallExpr)
if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
return false
}
mk := call.Args[1].(*ir.MakeExpr)
if mk.Cap != nil {
return false
}
// y must be either an integer constant or the largest possible positive value
// of variable y needs to fit into an uint.
// typecheck made sure that constant arguments to make are not negative and fit into an int.
// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
y := mk.Len
if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
return false
}
return true
}
// extendSlice rewrites append(l1, make([]T, l2)...) to
// init {
// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
// } else {
// panicmakeslicelen()
// }
// s := l1
// n := len(s) + l2
// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
// // cap is a positive int and n can become negative when len(s) + l2
// // overflows int. Interpreting n when negative as uint makes it larger
// // than cap(s). growslice will check the int n arg and panic if n is
// // negative. This prevents the overflow from being undetected.
// if uint(n) > uint(cap(s)) {
// s = growslice(T, s, n)
// }
// s = s[:n]
// lptr := &l1[0]
// sptr := &s[0]
// if lptr == sptr || !T.HasPointers() {
// // growslice did not clear the whole underlying array (or did not get called)
// hp := &s[len(l1)]
// hn := l2 * sizeof(T)
// memclr(hp, hn)
// }
// }
// s
func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
l2 = typecheck.Expr(l2)
n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
l1 := n.Args[0]
l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil)
nifneg.Likely = true
// else panicmakeslicelen()
nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
nodes = append(nodes, nifneg)
// s := l1
s := typecheck.Temp(l1.Type())
nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
elemtype := s.Type().Elem()
// n := len(s) + l2
nn := typecheck.Temp(types.Types[types.TINT])
nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
// if uint(n) > uint(cap(s))
nuint := typecheck.Conv(nn, types.Types[types.TUINT])
capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil)
// instantiate growslice(typ *type, old []any, newcap int) []any
fn := typecheck.LookupRuntime("growslice")
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes = append(nodes, nif)
// s = s[:n]
nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt))
// lptr := &l1[0]
l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo())
tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1)
nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp))
// sptr := &s[0]
sptr := typecheck.Temp(elemtype.PtrTo())
tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp))
// hp := &s[len(l1)]
ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
ix.SetBounded(true)
hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
// hn := l2 * sizeof(elem(s))
hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
ir.CurFunc.SetWBPos(n.Pos())
}
var clr ir.Nodes
clrfn := mkcall(clrname, nil, &clr, hp, hn)
clr.Append(clrfn)
if hasPointers {
// if l1ptr == sptr
nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil)
nifclr.Body = clr
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr...)
}
typecheck.Stmts(nodes)
walkStmtList(nodes)
init.Append(nodes...)
return s
}

View File

@ -0,0 +1,699 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"fmt"
"go/constant"
"go/token"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// Rewrite append(src, x, y, z) so that any side effects in
// x, y, z (including runtime panics) are evaluated in
// initialization statements before the append.
// For normal code generation, stop there and leave the
// rest to cgen_append.
//
// For race detector, expand append(src, a [, b]* ) to
//
// init {
// s := src
// const argc = len(args) - 1
// if cap(s) - len(s) < argc {
// s = growslice(s, len(s)+argc)
// }
// n := len(s)
// s = s[:n+argc]
// s[n] = a
// s[n+1] = b
// ...
// }
// s
func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
if !ir.SameSafeExpr(dst, n.Args[0]) {
n.Args[0] = safeExpr(n.Args[0], init)
n.Args[0] = walkExpr(n.Args[0], init)
}
walkExprListSafe(n.Args[1:], init)
nsrc := n.Args[0]
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
ls := n.Args[1:]
for i, n := range ls {
n = cheapExpr(n, init)
if !types.Identical(n.Type(), nsrc.Type().Elem()) {
n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
n = walkExpr(n, init)
}
ls[i] = n
}
argc := len(n.Args) - 1
if argc < 1 {
return nsrc
}
// General case, with no function calls left as arguments.
// Leave for gen, except that instrumentation requires old form.
if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
return n
}
var l []ir.Node
ns := typecheck.Temp(nsrc.Type())
l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
na := ir.NewInt(int64(argc)) // const argc
nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
l = append(l, nif)
nn := typecheck.Temp(types.Types[types.TINT])
l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc]
slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil)
slice.SetBounded(true)
l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
ls = n.Args[1:]
for i, n := range ls {
ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
ix.SetBounded(true)
l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
if i+1 < len(ls) {
l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
}
}
typecheck.Stmts(l)
walkStmtList(l)
init.Append(l...)
return ns
}
// walkClose walks an OCLOSE node.
func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
// cannot use chanfn - closechan takes any, not chan any
fn := typecheck.LookupRuntime("closechan")
fn = typecheck.SubstArgTypes(fn, n.X.Type())
return mkcall1(fn, nil, init, n.X)
}
// Lower copy(a, b) to a memmove call or a runtime call.
//
// init {
// n := len(a)
// if n > len(b) { n = len(b) }
// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
// }
// n;
//
// Also works if b is a string.
//
func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
if n.X.Type().Elem().HasPointers() {
ir.CurFunc.SetWBPos(n.Pos())
fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
n.X = cheapExpr(n.X, init)
ptrL, lenL := backingArrayPtrLen(n.X)
n.Y = cheapExpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y)
return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
// rely on runtime to instrument:
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
n.X = cheapExpr(n.X, init)
ptrL, lenL := backingArrayPtrLen(n.X)
n.Y = cheapExpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y)
fn := typecheck.LookupRuntime("slicecopy")
fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width))
}
n.X = walkExpr(n.X, init)
n.Y = walkExpr(n.Y, init)
nl := typecheck.Temp(n.X.Type())
nr := typecheck.Temp(n.Y.Type())
var l []ir.Node
l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
nlen := typecheck.Temp(types.Types[types.TINT])
// n = len(to)
l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
// if n > len(frm) { n = len(frm) }
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
ne.Likely = true
l = append(l, ne)
fn := typecheck.LookupRuntime("memmove")
fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
ne.Body.Append(setwid)
nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
ne.Body.Append(call)
typecheck.Stmts(l)
walkStmtList(l)
init.Append(l...)
return nlen
}
// walkDelete walks an ODELETE node.
func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
init.Append(n.PtrInit().Take()...)
map_ := n.Args[0]
key := n.Args[1]
map_ = walkExpr(map_, init)
key = walkExpr(key, init)
t := map_.Type()
fast := mapfast(t)
if fast == mapslow {
// order.stmt made sure key is addressable.
key = typecheck.NodAddr(key)
}
return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
}
// walkLenCap walks an OLEN or OCAP node.
func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
}
n.X = walkExpr(n.X, init)
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
t := n.X.Type()
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
safeExpr(n.X, init)
con := typecheck.OrigInt(n, t.NumElem())
con.SetTypecheck(1)
return con
}
return n
}
// walkMakeChan walks an OMAKECHAN node.
func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
size := n.Len
fnname := "makechan64"
argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makechan"
argtype = types.Types[types.TINT]
}
return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
}
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
hmapType := reflectdata.MapType(t)
hint := n.Len
// var h *hmap
var h ir.Node
if n.Esc() == ir.EscNone {
// Allocate hmap on stack.
// var hv hmap
hv := typecheck.Temp(hmapType)
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil)))
// h = &hv
h = typecheck.NodAddr(hv)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
//
// if hint <= BUCKETSIZE {
// var bv bmap
// b = &bv
// h.buckets = b
// }
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
nif.Likely = true
// var bv bmap
bv := typecheck.Temp(reflectdata.MapBucketType(t))
nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
// b = &bv
b := typecheck.NodAddr(bv)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
nif.Body.Append(na)
appendWalkStmt(init, nif)
}
}
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
if n.Esc() == ir.EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
rand := mkcall("fastrand", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
return typecheck.ConvNop(h, t)
}
// Call runtime.makehmap to allocate an
// hmap on the heap and initialize hmap's hash0 field.
fn := typecheck.LookupRuntime("makemap_small")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init)
}
if n.Esc() != ir.EscNone {
h = typecheck.NodNil()
}
// Map initialization with a variable or large hint is
// more complicated. We therefore generate a call to
// runtime.makemap to initialize hmap and allocate the
// map buckets.
// When hint fits into int, use makemap instead of
// makemap64, which is faster and shorter on 32 bit platforms.
fnname := "makemap64"
argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makemap"
argtype = types.Types[types.TINT]
}
fn := typecheck.LookupRuntime(fnname)
fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
}
// walkMakeSlice walks an OMAKESLICE node.
func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
l := n.Len
r := n.Cap
if r == nil {
r = safeExpr(l, init)
l = r
}
t := n.Type()
if t.Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
if n.Esc() == ir.EscNone {
if why := escape.HeapAllocReason(n); why != "" {
base.Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
i := typecheck.IndexConst(r)
if i < 0 {
base.Fatalf("walkexpr: invalid index %v", r)
}
// cap is constrained to [0,2^31) or [0,2^63) depending on whether
// we're in 32-bit or 64-bit systems. So it's safe to do:
//
// if uint64(len) > cap {
// if len < 0 { panicmakeslicelen() }
// panicmakeslicecap()
// }
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
init.Append(typecheck.Stmt(nif))
t = types.NewArray(t.Elem(), i) // [r]T
var_ := typecheck.Temp(t)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l]
r.SetSliceBounds(nil, l, nil)
// The conv is necessary in case n.Type is named.
return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
}
// n escapes; set up a call to makeslice.
// When len and cap can fit into int, use makeslice instead of
// makeslice64, which is faster and shorter on 32 bit platforms.
len, cap := l, r
fnname := "makeslice64"
argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makeslice during runtime.
if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
(cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
fnname = "makeslice"
argtype = types.Types[types.TINT]
}
m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
m.SetType(t)
fn := typecheck.LookupRuntime(fnname)
m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
m.Ptr.MarkNonNil()
m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])}
return walkExpr(typecheck.Expr(m), init)
}
// walkMakeSliceCopy walks an OMAKESLICECOPY node.
func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
if n.Esc() == ir.EscNone {
base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
}
t := n.Type()
if t.Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
length := typecheck.Conv(n.Len, types.Types[types.TINT])
copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
// replace make+copy with runtime.mallocgc+runtime.memmove.
// We do not check for overflow of len(to)*elem.Width here
// since len(from) is an existing checked slice capacity
// with same elem.Width for the from slice.
size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR]))
// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
fn := typecheck.LookupRuntime("mallocgc")
sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
sh.Ptr.MarkNonNil()
sh.LenCap = []ir.Node{length, length}
sh.SetType(t)
s := typecheck.Temp(t)
r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
r = walkExpr(r, init)
init.Append(r)
// instantiate memmove(to *any, frm *any, size uintptr)
fn = typecheck.LookupRuntime("memmove")
fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
init.Append(walkExpr(typecheck.Stmt(ncopy), init))
return s
}
// Replace make+copy with runtime.makeslicecopy.
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := typecheck.LookupRuntime("makeslicecopy")
s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
s.Ptr.MarkNonNil()
s.LenCap = []ir.Node{length, length}
s.SetType(t)
return walkExpr(typecheck.Expr(s), init)
}
// walkNew walks an ONEW node.
func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
if n.Type().Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
if n.Esc() == ir.EscNone {
if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
base.Fatalf("large ONEW with EscNone: %v", n)
}
r := typecheck.Temp(n.Type().Elem())
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp
return typecheck.Expr(typecheck.NodAddr(r))
}
return callnew(n.Type().Elem())
}
// generate code for print
func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
walkExprListCheap(nn.Args, init)
// For println, add " " between elements and "\n" at the end.
if nn.Op() == ir.OPRINTN {
s := nn.Args
t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, ir.NewString(" "))
}
t = append(t, n)
}
t = append(t, ir.NewString("\n"))
nn.Args.Set(t)
}
// Collapse runs of constant strings.
s := nn.Args
t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
for i < len(s) && ir.IsConst(s[i], constant.String) {
strs = append(strs, ir.StringVal(s[i]))
i++
}
if len(strs) > 0 {
t = append(t, ir.NewString(strings.Join(strs, "")))
}
if i < len(s) {
t = append(t, s[i])
i++
}
}
nn.Args.Set(t)
calls := []ir.Node{mkcall("printlock", nil, init)}
for i, n := range nn.Args {
if n.Op() == ir.OLITERAL {
if n.Type() == types.UntypedRune {
n = typecheck.DefaultLit(n, types.RuneType)
}
switch n.Val().Kind() {
case constant.Int:
n = typecheck.DefaultLit(n, types.Types[types.TINT64])
case constant.Float:
n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
}
}
if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
n = typecheck.DefaultLit(n, types.Types[types.TINT64])
}
n = typecheck.DefaultLit(n, nil)
nn.Args[i] = n
if n.Type() == nil || n.Type().Kind() == types.TFORW {
continue
}
var on *ir.Name
switch n.Type().Kind() {
case types.TINTER:
if n.Type().IsEmptyInterface() {
on = typecheck.LookupRuntime("printeface")
} else {
on = typecheck.LookupRuntime("printiface")
}
on = typecheck.SubstArgTypes(on, n.Type()) // any-1
case types.TPTR:
if n.Type().Elem().NotInHeap() {
on = typecheck.LookupRuntime("printuintptr")
n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
n.SetType(types.Types[types.TUNSAFEPTR])
n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
n.SetType(types.Types[types.TUINTPTR])
break
}
fallthrough
case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
on = typecheck.LookupRuntime("printpointer")
on = typecheck.SubstArgTypes(on, n.Type()) // any-1
case types.TSLICE:
on = typecheck.LookupRuntime("printslice")
on = typecheck.SubstArgTypes(on, n.Type()) // any-1
case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
on = typecheck.LookupRuntime("printhex")
} else {
on = typecheck.LookupRuntime("printuint")
}
case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
on = typecheck.LookupRuntime("printint")
case types.TFLOAT32, types.TFLOAT64:
on = typecheck.LookupRuntime("printfloat")
case types.TCOMPLEX64, types.TCOMPLEX128:
on = typecheck.LookupRuntime("printcomplex")
case types.TBOOL:
on = typecheck.LookupRuntime("printbool")
case types.TSTRING:
cs := ""
if ir.IsConst(n, constant.String) {
cs = ir.StringVal(n)
}
switch cs {
case " ":
on = typecheck.LookupRuntime("printsp")
case "\n":
on = typecheck.LookupRuntime("printnl")
default:
on = typecheck.LookupRuntime("printstring")
}
default:
badtype(ir.OPRINT, n.Type(), nil)
continue
}
r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
if params := on.Type().Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
if !types.Identical(t, n.Type()) {
n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
n.SetType(t)
}
r.Args.Append(n)
}
calls = append(calls, r)
}
calls = append(calls, mkcall("printunlock", nil, init))
typecheck.Stmts(calls)
walkExprList(calls, init)
r := ir.NewBlockStmt(base.Pos, nil)
r.List.Set(calls)
return walkStmt(typecheck.Stmt(r))
}
func badtype(op ir.Op, tl, tr *types.Type) {
var s string
if tl != nil {
s += fmt.Sprintf("\n\t%v", tl)
}
if tr != nil {
s += fmt.Sprintf("\n\t%v", tr)
}
// common mistake: *struct and *interface.
if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
s += "\n\t(*struct vs *interface)"
} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
s += "\n\t(*interface vs *struct)"
}
}
base.Errorf("illegal types for operand: %v%s", op, s)
}
func callnew(t *types.Type) ir.Node {
types.CalcSize(t)
n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
return n
}
func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := typecheck.LookupRuntime(name)
fn = typecheck.SubstArgTypes(fn, l, r)
return fn
}
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n ir.Node) bool {
return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
}

View File

@ -115,7 +115,7 @@ func Closure(fn *ir.Func) {
base.Pos = lno
}
func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
fn := clo.Func
// If no closure vars, don't bother wrapping.
@ -148,10 +148,10 @@ func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
clo.Prealloc = nil
}
return walkexpr(cfn, init)
return walkExpr(cfn, init)
}
func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
func walkCallPart(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@ -162,8 +162,8 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
if n.X.Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.X = cheapexpr(n.X, init)
n.X = walkexpr(n.X, nil)
n.X = cheapExpr(n.X, init)
n.X = walkExpr(n.X, nil)
tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
@ -193,5 +193,5 @@ func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
n.Prealloc = nil
}
return walkexpr(cfn, init)
return walkExpr(cfn, init)
}

View File

@ -0,0 +1,507 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"encoding/binary"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/sys"
)
// The result of walkCompare MUST be assigned back to n, e.g.
// n.Left = walkCompare(n.Left, init)
func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
return walkCompareInterface(n, init)
}
if n.X.Type().IsString() && n.Y.Type().IsString() {
return walkCompareString(n, init)
}
n.X = walkExpr(n.X, init)
n.Y = walkExpr(n.Y, init)
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
l := cheapExpr(n.X, init)
r := cheapExpr(n.Y, init)
// Swap so that l is the interface value and r is the concrete value.
if n.Y.Type().IsInterface() {
l, r = r, l
}
// Handle both == and !=.
eq := n.Op()
andor := ir.OOROR
if eq == ir.OEQ {
andor = ir.OANDAND
}
// Check for types equal.
// For empty interface, this is:
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
var eqtype ir.Node
tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
rtyp := reflectdata.TypePtr(r.Type())
if l.Type().IsEmptyInterface() {
tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
} else {
nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
}
// Check for data equal.
eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
// Put it all together.
expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
return finishCompare(n, expr, init)
}
// Must be comparison of array or struct.
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
t := n.X.Type()
var inline bool
maxcmpsize := int64(4)
unalignedLoad := canMergeLoads()
if unalignedLoad {
// Keep this low enough to generate less code than a function call.
maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
}
switch t.Kind() {
default:
if base.Debug.Libfuzzer != 0 && t.IsInteger() {
n.X = cheapExpr(n.X, init)
n.Y = cheapExpr(n.Y, init)
// If exactly one comparison operand is
// constant, invoke the constcmp functions
// instead, and arrange for the constant
// operand to be the first argument.
l, r := n.X, n.Y
if r.Op() == ir.OLITERAL {
l, r = r, l
}
constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
var fn string
var paramType *types.Type
switch t.Size() {
case 1:
fn = "libfuzzerTraceCmp1"
if constcmp {
fn = "libfuzzerTraceConstCmp1"
}
paramType = types.Types[types.TUINT8]
case 2:
fn = "libfuzzerTraceCmp2"
if constcmp {
fn = "libfuzzerTraceConstCmp2"
}
paramType = types.Types[types.TUINT16]
case 4:
fn = "libfuzzerTraceCmp4"
if constcmp {
fn = "libfuzzerTraceConstCmp4"
}
paramType = types.Types[types.TUINT32]
case 8:
fn = "libfuzzerTraceCmp8"
if constcmp {
fn = "libfuzzerTraceConstCmp8"
}
paramType = types.Types[types.TUINT64]
default:
base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
}
init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
}
return n
case types.TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
case types.TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
cmpl := n.X
for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
cmpl = cmpl.(*ir.ConvExpr).X
}
cmpr := n.Y
for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
cmpr = cmpr.(*ir.ConvExpr).X
}
// Chose not to inline. Call equality function directly.
if !inline {
// eq algs take pointers; cmpl and cmpr must be addressable
if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) {
base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
fn, needsize := eqFor(t)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args.Append(typecheck.NodAddr(cmpl))
call.Args.Append(typecheck.NodAddr(cmpr))
if needsize {
call.Args.Append(ir.NewInt(t.Width))
}
res := ir.Node(call)
if n.Op() != ir.OEQ {
res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
}
return finishCompare(n, res, init)
}
// inline: build boolean expression comparing element by element
andor := ir.OANDAND
if n.Op() == ir.ONE {
andor = ir.OOROR
}
var expr ir.Node
compare := func(el, er ir.Node) {
a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
if expr == nil {
expr = a
} else {
expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
}
}
cmpl = safeExpr(cmpl, init)
cmpr = safeExpr(cmpr, init)
if t.IsStruct() {
for _, f := range t.Fields().Slice() {
sym := f.Sym
if sym.IsBlank() {
continue
}
compare(
ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
)
}
} else {
step := int64(1)
remains := t.NumElem() * t.Elem().Width
combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
for i := int64(0); remains > 0; {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
convType = types.Types[types.TINT64]
step = 8 / t.Elem().Width
case remains >= 4 && combine32bit:
convType = types.Types[types.TUINT32]
step = 4 / t.Elem().Width
case remains >= 2 && combine16bit:
convType = types.Types[types.TUINT16]
step = 2 / t.Elem().Width
default:
step = 1
}
if step == 1 {
compare(
ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
)
i++
remains -= t.Elem().Width
} else {
elemType := t.Elem().ToUnsigned()
cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)))
cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
cmplw = typecheck.Conv(cmplw, convType) // widen
cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)))
cmprw = typecheck.Conv(cmprw, elemType)
cmprw = typecheck.Conv(cmprw, convType)
// For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will generate a single large load.
for offset := int64(1); offset < step; offset++ {
lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset)))
lb = typecheck.Conv(lb, elemType)
lb = typecheck.Conv(lb, convType)
lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset))
cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset)))
rb = typecheck.Conv(rb, elemType)
rb = typecheck.Conv(rb, convType)
rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset))
cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
}
compare(cmplw, cmprw)
i += step
remains -= step * t.Elem().Width
}
}
}
if expr == nil {
expr = ir.NewBool(n.Op() == ir.OEQ)
// We still need to use cmpl and cmpr, in case they contain
// an expression which might panic. See issue 23837.
t := typecheck.Temp(cmpl.Type())
a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl))
a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr))
init.Append(a1, a2)
}
return finishCompare(n, expr, init)
}
func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
n.Y = cheapExpr(n.Y, init)
n.X = cheapExpr(n.X, init)
eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
var cmp ir.Node
if n.Op() == ir.OEQ {
cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
} else {
eqtab.SetOp(ir.ONE)
cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
}
return finishCompare(n, cmp, init)
}
func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
var cs, ncs ir.Node // const string, non-const string
switch {
case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
// ignore; will be constant evaluated
case ir.IsConst(n.X, constant.String):
cs = n.X
ncs = n.Y
case ir.IsConst(n.Y, constant.String):
cs = n.Y
ncs = n.X
}
if cs != nil {
cmp := n.Op()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
if ir.IsConst(n.X, constant.String) {
cmp = brrev(cmp)
}
// maxRewriteLen was chosen empirically.
// It is the value that minimizes cmd/go file size
// across most architectures.
// See the commit description for CL 26758 for details.
maxRewriteLen := 6
// Some architectures can load unaligned byte sequence as 1 word.
// So we can cover longer strings with the same amount of code.
canCombineLoads := canMergeLoads()
combine64bit := false
if canCombineLoads {
// Keep this low enough to generate less code than a function call.
maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
}
var and ir.Op
switch cmp {
case ir.OEQ:
and = ir.OANDAND
case ir.ONE:
and = ir.OOROR
default:
// Don't do byte-wise comparisons for <, <=, etc.
// They're fairly complicated.
// Length-only checks are ok, though.
maxRewriteLen = 0
}
if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
if len(s) > 0 {
ncs = safeExpr(ncs, init)
}
r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s)))))
remains := len(s)
for i := 0; remains > 0; {
if remains == 1 || !canCombineLoads {
cb := ir.NewInt(int64(s[i]))
ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i)))
r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
remains--
i++
continue
}
var step int
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
convType = types.Types[types.TINT64]
step = 8
case remains >= 4:
convType = types.Types[types.TUINT32]
step = 4
case remains >= 2:
convType = types.Types[types.TUINT16]
step = 2
}
ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType)
csubstr := int64(s[i])
// Calculate large constant from bytes as sequence of shifts and ors.
// Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will combine this into a single large load.
for offset := 1; offset < step; offset++ {
b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType)
b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset)))
ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := ir.NewInt(csubstr)
// Compare "step" bytes as once
r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
remains -= step
i += step
}
return finishCompare(n, r, init)
}
}
var r ir.Node
if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
n.X = cheapExpr(n.X, init)
n.Y = cheapExpr(n.Y, init)
eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
if n.Op() == ir.OEQ {
// len(left) == len(right) && memequal(left, right, len)
r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
} else {
// len(left) != len(right) || !memequal(left, right, len)
eqlen.SetOp(ir.ONE)
r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
}
} else {
// sys_cmpstring(s1, s2) :: 0
r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0))
}
return finishCompare(n, r, init)
}
// The result of finishCompare MUST be assigned back to n, e.g.
// n.Left = finishCompare(n.Left, x, r, init)
func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck.Expr(r)
r = typecheck.Conv(r, n.Type())
r = walkExpr(r, init)
return r
}
func eqFor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
// is handled by walkcompare.
switch a, _ := types.AlgType(t); a {
case types.AMEM:
n := typecheck.LookupRuntime("memequal")
n = typecheck.SubstArgTypes(n, t, t)
return n, true
case types.ASPECIAL:
sym := reflectdata.TypeSymPrefix(".eq", t)
n := typecheck.NewName(sym)
ir.MarkFunc(n)
n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
}, []*ir.Field{
ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]),
}))
return n, false
}
base.Fatalf("eqfor %v", t)
return nil, false
}
// brcom returns !(op).
// For example, brcom(==) is !=.
func brcom(op ir.Op) ir.Op {
switch op {
case ir.OEQ:
return ir.ONE
case ir.ONE:
return ir.OEQ
case ir.OLT:
return ir.OGE
case ir.OGT:
return ir.OLE
case ir.OLE:
return ir.OGT
case ir.OGE:
return ir.OLT
}
base.Fatalf("brcom: no com for %v\n", op)
return op
}
// brrev returns reverse(op).
// For example, Brrev(<) is >.
func brrev(op ir.Op) ir.Op {
switch op {
case ir.OEQ:
return ir.OEQ
case ir.ONE:
return ir.ONE
case ir.OLT:
return ir.OGT
case ir.OGT:
return ir.OLT
case ir.OLE:
return ir.OGE
case ir.OGE:
return ir.OLE
}
base.Fatalf("brrev: no rev for %v\n", op)
return op
}
func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
n = copyExpr(n, n.Type(), init)
}
return typecheck.Conv(n, t)
}
// canMergeLoads reports whether the backend optimization passes for
// the current architecture can combine adjacent loads into a single
// larger, possibly unaligned, load. Note that currently the
// optimizations must be able to handle little endian byte order.
func canMergeLoads() bool {
switch ssagen.Arch.LinkArch.Family {
case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
return true
case sys.PPC64:
// Load combining only supported on ppc64le.
return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian
}
return false
}

View File

@ -7,6 +7,7 @@ package walk
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/staticinit"
"cmd/compile/internal/typecheck"
@ -14,6 +15,22 @@ import (
"cmd/internal/obj"
)
// walkCompLit walks a composite literal node:
// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
n := n.(*ir.CompLitExpr) // not OPTRLIT
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
vstat := readonlystaticname(n.Type())
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
return typecheck.Expr(vstat)
}
var_ := typecheck.Temp(n.Type())
anylit(n, var_, init)
return var_
}
// initContext is the context in which static data is populated.
// It is either in an init function or in any other function.
// Static data populated in an init function will be written either
@ -245,7 +262,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node,
genAsStatic(as)
case initKindDynamic, initKindLocalCode:
a = orderStmtInPlace(as, map[string][]*ir.Name{})
a = walkstmt(a)
a = walkStmt(a)
init.Append(a)
default:
base.Fatalf("fixedlit: bad kind %d", kind)
@ -403,7 +420,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
ir.SetPos(value)
as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value))
as = orderStmtInPlace(as, map[string][]*ir.Name{})
as = walkstmt(as)
as = walkStmt(as)
init.Append(as)
}
@ -412,7 +429,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
a = typecheck.Stmt(a)
a = orderStmtInPlace(a, map[string][]*ir.Name{})
a = walkstmt(a)
a = walkStmt(a)
init.Append(a)
}

View File

@ -0,0 +1,502 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"encoding/binary"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/sys"
)
// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
return n.X
}
if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T
return walkCheckPtrAlignment(n, init, nil)
}
if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
return walkCheckPtrArithmetic(n, init)
}
}
param, result := rtconvfn(n.X.Type(), n.Type())
if param == types.Txxx {
return n
}
fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
}
// walkConvInterface walks an OCONVIFACE node.
func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
fromType := n.X.Type()
toType := n.Type()
if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _())
reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
}
// typeword generates the type word of the interface value.
typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return reflectdata.TypePtr(fromType)
}
return reflectdata.ITabAddr(fromType, toType)
}
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if types.IsDirectIface(fromType) {
l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X)
l.SetType(toType)
l.SetTypecheck(n.Typecheck())
return l
}
if ir.Names.Staticuint64s == nil {
ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s"))
ir.Names.Staticuint64s.Class_ = ir.PEXTERN
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase"))
ir.Names.Zerobase.Class_ = ir.PEXTERN
ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR])
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
value = ir.Names.Zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
n.X = cheapExpr(n.X, init)
// byteindex widens n.Left so that the multiplication doesn't overflow.
index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
}
xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index)
xe.SetBounded(true)
value = xe
case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly():
// n.Left is a readonly global; use it directly.
value = n.X
case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = typecheck.Temp(fromType)
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X)))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value)))
l.SetType(toType)
l.SetTypecheck(n.Typecheck())
return l
}
// Implement interface to empty interface conversion.
// tmp = i.itab
// if tmp != nil {
// tmp = tmp.type
// }
// e = iface{tmp, i.data}
if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
// Evaluate the input interface.
c := typecheck.Temp(fromType)
init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
// Get the itab out of the interface.
tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c))))
// Get the type out of the itab.
nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))}
init.Append(nif)
// Build the result.
e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
return e
}
fnname, needsaddr := convFuncName(fromType, toType)
if !needsaddr && !fromType.IsInterface() {
// Use a specialized conversion routine that only returns a data pointer.
// ptr = convT2X(val)
// e = iface{typ/tab, ptr}
fn := typecheck.LookupRuntime(fnname)
types.CalcSize(fromType)
fn = typecheck.SubstArgTypes(fn, fromType)
types.CalcSize(fn.Type())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args = []ir.Node{n.X}
e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init))
e.SetType(toType)
e.SetTypecheck(1)
return e
}
var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = reflectdata.TypePtr(toType)
} else {
// convT2x
tab = typeword()
}
v := n.X
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
// the conversions it could see. Comparison of an interface
// with a non-interface, especially in a switch on interface value
// with non-interface cases, is not visible to order.stmt, so we
// have to fall back on allocating a temp here.
if !ir.IsAssignable(v) {
v = copyExpr(v, v.Type(), init)
}
v = typecheck.NodAddr(v)
}
types.CalcSize(fromType)
fn := typecheck.LookupRuntime(fnname)
fn = typecheck.SubstArgTypes(fn, fromType, toType)
types.CalcSize(fn.Type())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args = []ir.Node{tab, v}
return walkExpr(typecheck.Expr(call), init)
}
// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for string on stack.
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
a = typecheck.NodAddr(typecheck.Temp(t))
}
if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
return mkcall("slicerunetostring", n.Type(), init, a, n.X)
}
// slicebytetostring(*[32]byte, ptr *byte, n int) string
n.X = cheapExpr(n.X, init)
ptr, len := backingArrayPtrLen(n.X)
return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
}
// walkBytesToStringTemp walks an OBYTES2STRTMP node.
func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
if !base.Flag.Cfg.Instrumenting {
// Let the backend handle OBYTES2STRTMP directly
// to avoid a function call to slicebytetostringtmp.
return n
}
// slicebytetostringtmp(ptr *byte, n int) string
n.X = cheapExpr(n.X, init)
ptr, len := backingArrayPtrLen(n.X)
return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
}
// walkRuneToString walks an ORUNESTR node.
func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
t := types.NewArray(types.Types[types.TUINT8], 4)
a = typecheck.NodAddr(typecheck.Temp(t))
}
// intstring(*[4]byte, rune)
return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
}
// walkStringToBytes walks an OSTR2BYTES node.
func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
s := n.X
if ir.IsConst(s, constant.String) {
sc := ir.StringVal(s)
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
var a ir.Node
if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
a = typecheck.NodAddr(typecheck.Temp(t))
} else {
a = callnew(t)
}
p := typecheck.Temp(t.PtrTo()) // *[n]byte
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
// Copy from the static string data to the [n]byte.
if len(sc) > 0 {
as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo())))
appendWalkStmt(init, as)
}
// Slice the [n]byte to a []byte.
slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p)
slice.SetType(n.Type())
slice.SetTypecheck(1)
return walkExpr(slice, init)
}
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
a = typecheck.NodAddr(typecheck.Temp(t))
}
// stringtoslicebyte(*32[byte], string) []byte
return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
}
// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// []byte(string) conversion that creates a slice
// referring to the actual string bytes.
// This conversion is handled later by the backend and
// is only for use by internal compiler optimizations
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
n.X = walkExpr(n.X, init)
return n
}
// walkStringToRunes walks an OSTR2RUNES node.
func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
a = typecheck.NodAddr(typecheck.Temp(t))
}
// stringtoslicerune(*[32]rune, string) []rune
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
}
// convFuncName builds the runtime function name for interface conversion.
// It also reports whether the function expects the data by address.
// Not all names are possible. For example, we never generate convE2E or convE2I.
func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
tkind := to.Tie()
switch from.Tie() {
case 'I':
if tkind == 'I' {
return "convI2I", false
}
case 'T':
switch {
case from.Size() == 2 && from.Align == 2:
return "convT16", false
case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
switch {
case sc.IsString():
return "convTstring", false
case sc.IsSlice():
return "convTslice", false
}
}
switch tkind {
case 'E':
if !from.HasPointers() {
return "convT2Enoptr", true
}
return "convT2E", true
case 'I':
if !from.HasPointers() {
return "convT2Inoptr", true
}
return "convT2I", true
}
}
base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// rtconvfn returns the parameter and result types that will be used by a
// runtime function to convert from type src to type dst. The runtime function
// name can be derived from the names of the returned types.
//
// If no such function is necessary, it returns (Txxx, Txxx).
func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
if ssagen.Arch.SoftFloat {
return types.Txxx, types.Txxx
}
switch ssagen.Arch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
switch dst.Kind() {
case types.TINT64, types.TUINT64:
return types.TFLOAT64, dst.Kind()
}
}
if dst.IsFloat() {
switch src.Kind() {
case types.TINT64, types.TUINT64:
return src.Kind(), types.TFLOAT64
}
}
case sys.I386:
if src.IsFloat() {
switch dst.Kind() {
case types.TINT64, types.TUINT64:
return types.TFLOAT64, dst.Kind()
case types.TUINT32, types.TUINT, types.TUINTPTR:
return types.TFLOAT64, types.TUINT32
}
}
if dst.IsFloat() {
switch src.Kind() {
case types.TINT64, types.TUINT64:
return src.Kind(), types.TFLOAT64
case types.TUINT32, types.TUINT, types.TUINTPTR:
return types.TUINT32, types.TFLOAT64
}
}
}
return types.Txxx, types.Txxx
}
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
n.SetType(types.Types[types.TUINT8])
n.SetTypecheck(1)
}
n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
n.SetType(types.Types[types.TINT])
n.SetTypecheck(1)
return n
}
func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
if !n.Type().IsPtr() {
base.Fatalf("expected pointer type: %v", n.Type())
}
elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
base.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
}
size := elem.Size()
if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
return n
}
if count == nil {
count = ir.NewInt(1)
}
n.X = cheapExpr(n.X, init)
init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
return n
}
func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
return n
} else if opt != nil {
// We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
// there's no guarantee that temporarily replacing it is safe, so just hard fail here.
base.Fatalf("unexpected Opt: %v", opt)
}
n.SetOpt(&walkCheckPtrArithmeticMarker)
defer n.SetOpt(nil)
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
switch n.X.Op() {
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return n
}
if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
return n
}
// Find original unsafe.Pointer operands involved in this
// arithmetic expression.
//
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
var originals []ir.Node
var walk func(n ir.Node)
walk = func(n ir.Node) {
switch n.Op() {
case ir.OADD:
n := n.(*ir.BinaryExpr)
walk(n.X)
walk(n.Y)
case ir.OSUB, ir.OANDNOT:
n := n.(*ir.BinaryExpr)
walk(n.X)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.X.Type().IsUnsafePtr() {
n.X = cheapExpr(n.X, init)
originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
}
}
}
walk(n.X)
cheap := cheapExpr(n, init)
slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
slice.SetEsc(ir.EscNone)
init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
// the backing store for multiple calls to checkptrArithmetic.
return cheap
}

File diff suppressed because it is too large Load Diff

View File

@ -32,11 +32,11 @@ func cheapComputableIndex(width int64) bool {
return false
}
// walkrange transforms various forms of ORANGE into
// walkRange transforms various forms of ORANGE into
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(nrange *ir.RangeStmt) ir.Node {
func walkRange(nrange *ir.RangeStmt) ir.Node {
if isMapClear(nrange) {
m := nrange.X
lno := ir.SetPos(m)
@ -325,7 +325,7 @@ func walkrange(nrange *ir.RangeStmt) ir.Node {
n = ifGuard
}
n = walkstmt(n)
n = walkStmt(n)
base.Pos = lno
return n
@ -387,7 +387,7 @@ func mapClear(m ir.Node) ir.Node {
fn := typecheck.LookupRuntime("mapclear")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
return walkstmt(typecheck.Stmt(n))
return walkStmt(typecheck.Stmt(n))
}
// Lower n into runtime·memclr if possible, for
@ -477,7 +477,7 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
n.Cond = typecheck.Expr(n.Cond)
n.Cond = typecheck.DefaultLit(n.Cond, nil)
typecheck.Stmts(n.Body)
return walkstmt(n)
return walkStmt(n)
}
// addptr returns (*T)(uintptr(p) + n).

View File

@ -11,7 +11,7 @@ import (
"cmd/compile/internal/types"
)
func walkselect(sel *ir.SelectStmt) {
func walkSelect(sel *ir.SelectStmt) {
lno := ir.SetPos(sel)
if len(sel.Compiled) != 0 {
base.Fatalf("double walkselect")
@ -20,16 +20,16 @@ func walkselect(sel *ir.SelectStmt) {
init := sel.Init()
sel.PtrInit().Set(nil)
init = append(init, walkselectcases(sel.Cases)...)
init = append(init, walkSelectCases(sel.Cases)...)
sel.Cases = ir.Nodes{}
sel.Compiled.Set(init)
walkstmtlist(sel.Compiled)
walkStmtList(sel.Compiled)
base.Pos = lno
}
func walkselectcases(cases ir.Nodes) []ir.Node {
func walkSelectCases(cases ir.Nodes) []ir.Node {
ncas := len(cases)
sellineno := base.Pos

View File

@ -0,0 +1,315 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
)
// The result of walkStmt MUST be assigned back to n, e.g.
// n.Left = walkStmt(n.Left)
func walkStmt(n ir.Node) ir.Node {
if n == nil {
return n
}
ir.SetPos(n)
walkStmtList(n.Init())
switch n.Op() {
default:
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
base.Errorf("%v is not a top level statement", n.Sym())
} else {
base.Errorf("%v is not a top level statement", n.Op())
}
ir.Dump("nottop", n)
return n
case ir.OAS,
ir.OASOP,
ir.OAS2,
ir.OAS2DOTTYPE,
ir.OAS2RECV,
ir.OAS2FUNC,
ir.OAS2MAPR,
ir.OCLOSE,
ir.OCOPY,
ir.OCALLMETH,
ir.OCALLINTER,
ir.OCALL,
ir.OCALLFUNC,
ir.ODELETE,
ir.OSEND,
ir.OPRINT,
ir.OPRINTN,
ir.OPANIC,
ir.ORECOVER,
ir.OGETG:
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
}
init := n.Init()
n.PtrInit().Set(nil)
n = walkExpr(n, &init)
if n.Op() == ir.ONAME {
// copy rewrote to a statement list and a temp for the length.
// Throw away the temp to avoid plain values as statements.
n = ir.NewBlockStmt(n.Pos(), init)
init.Set(nil)
}
if len(init) > 0 {
switch n.Op() {
case ir.OAS, ir.OAS2, ir.OBLOCK:
n.PtrInit().Prepend(init...)
default:
init.Append(n)
n = ir.NewBlockStmt(n.Pos(), init)
}
}
return n
// special case for a receive where we throw away
// the value received.
case ir.ORECV:
n := n.(*ir.UnaryExpr)
return walkRecv(n)
case ir.OBREAK,
ir.OCONTINUE,
ir.OFALL,
ir.OGOTO,
ir.OLABEL,
ir.ODCLCONST,
ir.ODCLTYPE,
ir.OCHECKNIL,
ir.OVARDEF,
ir.OVARKILL,
ir.OVARLIVE:
return n
case ir.ODCL:
n := n.(*ir.Decl)
return walkDecl(n)
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
walkStmtList(n.List)
return n
case ir.OCASE:
base.Errorf("case statement out of place")
panic("unreachable")
case ir.ODEFER:
n := n.(*ir.GoDeferStmt)
ir.CurFunc.SetHasDefer(true)
ir.CurFunc.NumDefers++
if ir.CurFunc.NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
ir.CurFunc.SetOpenCodedDeferDisallowed(true)
}
if n.Esc() != ir.EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
ir.CurFunc.SetOpenCodedDeferDisallowed(true)
}
fallthrough
case ir.OGO:
n := n.(*ir.GoDeferStmt)
return walkGoDefer(n)
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
return walkFor(n)
case ir.OIF:
n := n.(*ir.IfStmt)
return walkIf(n)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
return walkReturn(n)
case ir.ORETJMP:
n := n.(*ir.BranchStmt)
return n
case ir.OINLMARK:
n := n.(*ir.InlineMarkStmt)
return n
case ir.OSELECT:
n := n.(*ir.SelectStmt)
walkSelect(n)
return n
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
walkSwitch(n)
return n
case ir.ORANGE:
n := n.(*ir.RangeStmt)
return walkRange(n)
}
// No return! Each case must return (or panic),
// to avoid confusion about what gets returned
// in the presence of type assertions.
}
func walkStmtList(s []ir.Node) {
for i := range s {
s[i] = walkStmt(s[i])
}
}
// walkDecl walks an ODCL node.
func walkDecl(n *ir.Decl) ir.Node {
v := n.X.(*ir.Name)
if v.Class_ == ir.PAUTOHEAP {
if base.Flag.CompilingRuntime {
base.Errorf("%v escapes to heap, not allowed in runtime", v)
}
nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type()))
nn.Def = true
return walkStmt(typecheck.Stmt(nn))
}
return n
}
// walkFor walks an OFOR or OFORUNTIL node.
func walkFor(n *ir.ForStmt) ir.Node {
if n.Cond != nil {
walkStmtList(n.Cond.Init())
init := n.Cond.Init()
n.Cond.PtrInit().Set(nil)
n.Cond = walkExpr(n.Cond, &init)
n.Cond = ir.InitExpr(init, n.Cond)
}
n.Post = walkStmt(n.Post)
if n.Op() == ir.OFORUNTIL {
walkStmtList(n.Late)
}
walkStmtList(n.Body)
return n
}
// walkGoDefer walks an OGO or ODEFER node.
func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
var init ir.Nodes
switch call := n.Call; call.Op() {
case ir.OPRINT, ir.OPRINTN:
call := call.(*ir.CallExpr)
n.Call = wrapCall(call, &init)
case ir.ODELETE:
call := call.(*ir.CallExpr)
if mapfast(call.Args[0].Type()) == mapslow {
n.Call = wrapCall(call, &init)
} else {
n.Call = walkExpr(call, &init)
}
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
n.Call = walkCopy(call, &init, true)
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
call := call.(*ir.CallExpr)
if len(call.Body) > 0 {
n.Call = wrapCall(call, &init)
} else {
n.Call = walkExpr(call, &init)
}
default:
n.Call = walkExpr(call, &init)
}
if len(init) > 0 {
init.Append(n)
return ir.NewBlockStmt(n.Pos(), init)
}
return n
}
// walkIf walks an OIF node.
func walkIf(n *ir.IfStmt) ir.Node {
n.Cond = walkExpr(n.Cond, n.PtrInit())
walkStmtList(n.Body)
walkStmtList(n.Else)
return n
}
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
if len(n.Init()) != 0 {
walkStmtList(n.Init())
init.Append(n.PtrInit().Take()...)
}
isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
if !isBuiltinCall && n.IsDDD {
last := len(n.Args) - 1
if va := n.Args[last]; va.Op() == ir.OSLICELIT {
va := va.(*ir.CompLitExpr)
n.Args.Set(append(n.Args[:last], va.List...))
n.IsDDD = false
}
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
origArgs := make([]ir.Node, len(n.Args))
var funcArgs []*ir.Field
for i, arg := range n.Args {
s := typecheck.LookupNum("a", i)
if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
origArgs[i] = arg
arg = arg.(*ir.ConvExpr).X
n.Args[i] = arg
}
funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
}
t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
wrapCall_prgen++
sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
fn := typecheck.DeclFunc(sym, t)
args := ir.ParamNames(t.Type())
for i, origArg := range origArgs {
if origArg == nil {
continue
}
args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
}
call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
if !isBuiltinCall {
call.SetOp(ir.OCALL)
call.IsDDD = n.IsDDD
}
fn.Body = []ir.Node{call}
typecheck.FinishFuncBody()
typecheck.Func(fn)
typecheck.Stmts(fn.Body)
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args)
return walkExpr(typecheck.Stmt(call), init)
}

View File

@ -1,338 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package walk
import (
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || len(init) != 0 {
base.Fatalf("backingArrayPtrLen not cheap: %v", n)
}
ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
if n.Type().IsString() {
ptr.SetType(types.Types[types.TUINT8].PtrTo())
} else {
ptr.SetType(n.Type().Elem().PtrTo())
}
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
length.SetType(types.Types[types.TINT])
return ptr, length
}
// updateHasCall checks whether expression n contains any function
// calls and sets the n.HasCall flag if so.
func updateHasCall(n ir.Node) {
if n == nil {
return
}
n.SetHasCall(calcHasCall(n))
}
func calcHasCall(n ir.Node) bool {
if len(n.Init()) != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
}
switch n.Op() {
default:
base.Fatalf("calcHasCall %+v", n)
panic("unreachable")
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
if n.HasCall() {
base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
}
return false
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return true
case ir.OANDAND, ir.OOROR:
// hard with instrumented code
n := n.(*ir.LogicalExpr)
if base.Flag.Cfg.Instrumenting {
return true
}
return n.X.HasCall() || n.Y.HasCall()
case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
// These ops might panic, make sure they are done
// before we start marshaling args for a call. See issue 16760.
return true
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
case ir.OADD, ir.OSUB, ir.OMUL:
n := n.(*ir.BinaryExpr)
if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
return true
}
return n.X.HasCall() || n.Y.HasCall()
case ir.ONEG:
n := n.(*ir.UnaryExpr)
if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
return true
}
return n.X.HasCall()
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
return true
}
return n.X.HasCall() || n.Y.HasCall()
case ir.OCONV:
n := n.(*ir.ConvExpr)
if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
return true
}
return n.X.HasCall()
case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
n := n.(*ir.BinaryExpr)
return n.X.HasCall() || n.Y.HasCall()
case ir.OAS:
n := n.(*ir.AssignStmt)
return n.X.HasCall() || n.Y != nil && n.Y.HasCall()
case ir.OADDR:
n := n.(*ir.AddrExpr)
return n.X.HasCall()
case ir.OPAREN:
n := n.(*ir.ParenExpr)
return n.X.HasCall()
case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
n := n.(*ir.UnaryExpr)
return n.X.HasCall()
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
n := n.(*ir.SelectorExpr)
return n.X.HasCall()
case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
return false
// TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
case ir.OADDSTR:
// TODO(rsc): This used to check left and right, which are not part of OADDSTR.
return false
case ir.OBLOCK:
// TODO(rsc): Surely the block's statements matter.
return false
case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
// TODO(rsc): Some conversions are themselves calls, no?
n := n.(*ir.ConvExpr)
return n.X.HasCall()
case ir.ODOTTYPE2:
// TODO(rsc): Shouldn't this be up with ODOTTYPE above?
n := n.(*ir.TypeAssertExpr)
return n.X.HasCall()
case ir.OSLICEHEADER:
// TODO(rsc): What about len and cap?
n := n.(*ir.SliceHeaderExpr)
return n.Ptr.HasCall()
case ir.OAS2DOTTYPE, ir.OAS2FUNC:
// TODO(rsc): Surely we need to check List and Rlist.
return false
}
}
func badtype(op ir.Op, tl, tr *types.Type) {
var s string
if tl != nil {
s += fmt.Sprintf("\n\t%v", tl)
}
if tr != nil {
s += fmt.Sprintf("\n\t%v", tr)
}
// common mistake: *struct and *interface.
if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
s += "\n\t(*struct vs *interface)"
} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
s += "\n\t(*interface vs *struct)"
}
}
base.Errorf("illegal types for operand: %v%s", op, s)
}
// brcom returns !(op).
// For example, brcom(==) is !=.
func brcom(op ir.Op) ir.Op {
switch op {
case ir.OEQ:
return ir.ONE
case ir.ONE:
return ir.OEQ
case ir.OLT:
return ir.OGE
case ir.OGT:
return ir.OLE
case ir.OLE:
return ir.OGT
case ir.OGE:
return ir.OLT
}
base.Fatalf("brcom: no com for %v\n", op)
return op
}
// brrev returns reverse(op).
// For example, Brrev(<) is >.
func brrev(op ir.Op) ir.Op {
switch op {
case ir.OEQ:
return ir.OEQ
case ir.ONE:
return ir.ONE
case ir.OLT:
return ir.OGT
case ir.OGT:
return ir.OLT
case ir.OLE:
return ir.OGE
case ir.OGE:
return ir.OLE
}
base.Fatalf("brrev: no rev for %v\n", op)
return op
}
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return nil
}
if len(n.Init()) != 0 {
walkstmtlist(n.Init())
init.Append(n.PtrInit().Take()...)
}
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
return n
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
l := safeexpr(n.X, init)
if l == n.X {
return n
}
a := ir.Copy(n).(*ir.UnaryExpr)
a.X = l
return walkexpr(typecheck.Expr(a), init)
case ir.ODOT, ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
l := safeexpr(n.X, init)
if l == n.X {
return n
}
a := ir.Copy(n).(*ir.SelectorExpr)
a.X = l
return walkexpr(typecheck.Expr(a), init)
case ir.ODEREF:
n := n.(*ir.StarExpr)
l := safeexpr(n.X, init)
if l == n.X {
return n
}
a := ir.Copy(n).(*ir.StarExpr)
a.X = l
return walkexpr(typecheck.Expr(a), init)
case ir.OINDEX, ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
l := safeexpr(n.X, init)
r := safeexpr(n.Index, init)
if l == n.X && r == n.Index {
return n
}
a := ir.Copy(n).(*ir.IndexExpr)
a.X = l
a.Index = r
return walkexpr(typecheck.Expr(a), init)
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
if isStaticCompositeLiteral(n) {
return n
}
}
// make a copy; must not be used as an lvalue
if ir.IsAssignable(n) {
base.Fatalf("missing lvalue case in safeexpr: %v", n)
}
return cheapexpr(n, init)
}
func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := typecheck.Temp(t)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
return l
}
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
}
return copyexpr(n, n.Type(), init)
}
// itabType loads the _type field from a runtime.itab struct.
func itabType(itab ir.Node) ir.Node {
typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab
typ.SetBounded(true) // guaranteed not to fault
return typ
}
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
base.Fatalf("ifaceData interface: %v", t)
}
ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
if types.IsDirectIface(t) {
ptr.SetType(t)
ptr.SetTypecheck(1)
return ptr
}
ptr.SetType(types.NewPtr(t))
ptr.SetTypecheck(1)
ind := ir.NewStarExpr(pos, ptr)
ind.SetType(t)
ind.SetTypecheck(1)
ind.SetBounded(true)
return ind
}

View File

@ -16,23 +16,23 @@ import (
"cmd/internal/src"
)
// walkswitch walks a switch statement.
func walkswitch(sw *ir.SwitchStmt) {
// walkSwitch walks a switch statement.
func walkSwitch(sw *ir.SwitchStmt) {
// Guard against double walk, see #25776.
if len(sw.Cases) == 0 && len(sw.Compiled) > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
walkTypeSwitch(sw)
walkSwitchType(sw)
} else {
walkExprSwitch(sw)
walkSwitchExpr(sw)
}
}
// walkExprSwitch generates an AST implementing sw. sw is an
// walkSwitchExpr generates an AST implementing sw. sw is an
// expression switch.
func walkExprSwitch(sw *ir.SwitchStmt) {
func walkSwitchExpr(sw *ir.SwitchStmt) {
lno := ir.SetPos(sw)
cond := sw.Tag
@ -57,9 +57,9 @@ func walkExprSwitch(sw *ir.SwitchStmt) {
cond.SetOp(ir.OBYTES2STRTMP)
}
cond = walkexpr(cond, sw.PtrInit())
cond = walkExpr(cond, sw.PtrInit())
if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
cond = copyexpr(cond, cond.Type(), &sw.Compiled)
cond = copyExpr(cond, cond.Type(), &sw.Compiled)
}
base.Pos = lno
@ -107,7 +107,7 @@ func walkExprSwitch(sw *ir.SwitchStmt) {
s.Emit(&sw.Compiled)
sw.Compiled.Append(defaultGoto)
sw.Compiled.Append(body.Take()...)
walkstmtlist(sw.Compiled)
walkStmtList(sw.Compiled)
}
// An exprSwitch walks an expression switch.
@ -287,15 +287,15 @@ func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// walkSwitchType generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw *ir.SwitchStmt) {
func walkSwitchType(sw *ir.SwitchStmt) {
var s typeSwitch
s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
sw.Tag = nil
s.facename = walkexpr(s.facename, sw.PtrInit())
s.facename = copyexpr(s.facename, s.facename.Type(), &sw.Compiled)
s.facename = walkExpr(s.facename, sw.PtrInit())
s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
s.okname = typecheck.Temp(types.Types[types.TBOOL])
// Get interface descriptor word.
@ -327,7 +327,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) {
dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
s.hashname = copyexpr(dotHash, dotHash.Type(), &sw.Compiled)
s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
var defaultGoto, nilGoto ir.Node
@ -409,7 +409,7 @@ func walkTypeSwitch(sw *ir.SwitchStmt) {
sw.Compiled.Append(defaultGoto)
sw.Compiled.Append(body.Take()...)
walkstmtlist(sw.Compiled)
walkStmtList(sw.Compiled)
}
// A typeSwitch walks a type switch.

File diff suppressed because it is too large Load Diff