mirror of
https://github.com/golang/go
synced 2024-11-05 20:06:10 -07:00
cmd/compile,runtime: open code unsafe.Slice
So prevent heavy runtime call overhead, and the compiler will have a chance to optimize the bound check. With this optimization, changing runtime/stack.go to use unsafe.Slice no longer negatively impacts stack copying performance: name old time/op new time/op delta StackCopyWithStkobj-8 16.3ms ± 6% 16.5ms ± 5% ~ (p=0.382 n=8+8) name old alloc/op new alloc/op delta StackCopyWithStkobj-8 17.0B ± 0% 17.0B ± 0% ~ (all equal) name old allocs/op new allocs/op delta StackCopyWithStkobj-8 1.00 ± 0% 1.00 ± 0% ~ (all equal) Fixes #48798 Change-Id: I731a9a4abd6dd6846f44eece7f86025b7bb1141b Reviewed-on: https://go-review.googlesource.com/c/go/+/362934 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com> Reviewed-by: Keith Randall <khr@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com>
This commit is contained in:
parent
ccb798741b
commit
579902d0b1
@ -3921,6 +3921,7 @@ func InitTables() {
|
||||
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
|
||||
},
|
||||
sys.AMD64, sys.I386, sys.MIPS64, sys.RISCV64)
|
||||
alias("runtime", "mulUintptr", "runtime/internal/math", "MulUintptr", all...)
|
||||
add("runtime", "KeepAlive",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
|
||||
|
@ -134,9 +134,10 @@ var runtimeDecls = [...]struct {
|
||||
{"makeslice64", funcTag, 114},
|
||||
{"makeslicecopy", funcTag, 115},
|
||||
{"growslice", funcTag, 117},
|
||||
{"unsafeslice", funcTag, 118},
|
||||
{"unsafeslice64", funcTag, 119},
|
||||
{"unsafeslicecheckptr", funcTag, 119},
|
||||
{"unsafeslicecheckptr", funcTag, 118},
|
||||
{"panicunsafeslicelen", funcTag, 9},
|
||||
{"panicunsafeslicenilptr", funcTag, 9},
|
||||
{"mulUintptr", funcTag, 119},
|
||||
{"memmove", funcTag, 120},
|
||||
{"memclrNoHeapPointers", funcTag, 121},
|
||||
{"memclrHasPointers", funcTag, 121},
|
||||
@ -346,8 +347,8 @@ func runtimeTypes() []*types.Type {
|
||||
typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
|
||||
typs[116] = types.NewSlice(typs[2])
|
||||
typs[117] = newSig(params(typs[1], typs[116], typs[15]), params(typs[116]))
|
||||
typs[118] = newSig(params(typs[1], typs[7], typs[15]), nil)
|
||||
typs[119] = newSig(params(typs[1], typs[7], typs[22]), nil)
|
||||
typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil)
|
||||
typs[119] = newSig(params(typs[5], typs[5]), params(typs[5], typs[6]))
|
||||
typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
|
||||
typs[121] = newSig(params(typs[7], typs[5]), nil)
|
||||
typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
|
||||
|
@ -180,9 +180,11 @@ func makeslice(typ *byte, len int, cap int) unsafe.Pointer
|
||||
func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer
|
||||
func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
|
||||
func growslice(typ *byte, old []any, cap int) (ary []any)
|
||||
func unsafeslice(typ *byte, ptr unsafe.Pointer, len int)
|
||||
func unsafeslice64(typ *byte, ptr unsafe.Pointer, len int64)
|
||||
func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64)
|
||||
func panicunsafeslicelen()
|
||||
func panicunsafeslicenilptr()
|
||||
|
||||
func mulUintptr(x, y uintptr) (uintptr, bool)
|
||||
|
||||
func memmove(to *any, frm *any, length uintptr)
|
||||
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
|
||||
|
@ -645,28 +645,67 @@ func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
|
||||
func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
|
||||
ptr := safeExpr(n.X, init)
|
||||
len := safeExpr(n.Y, init)
|
||||
sliceType := n.Type()
|
||||
|
||||
fnname := "unsafeslice64"
|
||||
lenType := types.Types[types.TINT64]
|
||||
unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR])
|
||||
|
||||
// If checkptr enabled, call runtime.unsafeslicecheckptr to check ptr and len.
|
||||
// for simplicity, unsafeslicecheckptr always uses int64.
|
||||
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
|
||||
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
|
||||
// will be handled by the negative range checks in unsafeslice during runtime.
|
||||
if ir.ShouldCheckPtr(ir.CurFunc, 1) {
|
||||
fnname = "unsafeslicecheckptr"
|
||||
// for simplicity, unsafeslicecheckptr always uses int64
|
||||
} else if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
|
||||
fnname = "unsafeslice"
|
||||
lenType = types.Types[types.TINT]
|
||||
fnname := "unsafeslicecheckptr"
|
||||
fn := typecheck.LookupRuntime(fnname)
|
||||
init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(sliceType.Elem()), unsafePtr, typecheck.Conv(len, lenType)))
|
||||
} else {
|
||||
// Otherwise, open code unsafe.Slice to prevent runtime call overhead.
|
||||
// Keep this code in sync with runtime.unsafeslice{,64}
|
||||
if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
|
||||
lenType = types.Types[types.TINT]
|
||||
} else {
|
||||
// len64 := int64(len)
|
||||
// if int64(int(len64)) != len64 {
|
||||
// panicunsafeslicelen()
|
||||
// }
|
||||
len64 := typecheck.Conv(len, lenType)
|
||||
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
|
||||
nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64)
|
||||
nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
|
||||
appendWalkStmt(init, nif)
|
||||
}
|
||||
|
||||
// if len < 0 { panicunsafeslicelen() }
|
||||
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
|
||||
nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(0))
|
||||
nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
|
||||
appendWalkStmt(init, nif)
|
||||
|
||||
// mem, overflow := runtime.mulUintptr(et.size, len)
|
||||
mem := typecheck.Temp(types.Types[types.TUINTPTR])
|
||||
overflow := typecheck.Temp(types.Types[types.TBOOL])
|
||||
fn := typecheck.LookupRuntime("mulUintptr")
|
||||
call := mkcall1(fn, fn.Type().Results(), init, ir.NewInt(sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
|
||||
appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call}))
|
||||
|
||||
// if overflow || mem > -uintptr(ptr) {
|
||||
// if ptr == nil {
|
||||
// panicunsafesliceptrnil()
|
||||
// }
|
||||
// panicunsafeslicelen()
|
||||
// }
|
||||
nif = ir.NewIfStmt(base.Pos, nil, nil, nil)
|
||||
memCond := ir.NewBinaryExpr(base.Pos, ir.OGT, mem, ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR])))
|
||||
nif.Cond = ir.NewLogicalExpr(base.Pos, ir.OOROR, overflow, memCond)
|
||||
nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
|
||||
nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
|
||||
nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body))
|
||||
nif.Body.Append(nifPtr, mkcall("panicunsafeslicelen", nil, &nif.Body))
|
||||
appendWalkStmt(init, nif)
|
||||
}
|
||||
|
||||
t := n.Type()
|
||||
|
||||
// Call runtime.unsafeslice{,64,checkptr} to check ptr and len.
|
||||
fn := typecheck.LookupRuntime(fnname)
|
||||
init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), typecheck.Conv(len, lenType)))
|
||||
|
||||
h := ir.NewSliceHeaderExpr(n.Pos(), t,
|
||||
h := ir.NewSliceHeaderExpr(n.Pos(), sliceType,
|
||||
typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
|
||||
typecheck.Conv(len, types.Types[types.TINT]),
|
||||
typecheck.Conv(len, types.Types[types.TINT]))
|
||||
|
@ -117,6 +117,13 @@ func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
|
||||
return makeslice(et, len, cap)
|
||||
}
|
||||
|
||||
// This is a wrapper over runtime/internal/math.MulUintptr,
|
||||
// so the compiler can recognize and treat it as an intrinsic.
|
||||
func mulUintptr(a, b uintptr) (uintptr, bool) {
|
||||
return math.MulUintptr(a, b)
|
||||
}
|
||||
|
||||
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
|
||||
func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
|
||||
if len < 0 {
|
||||
panicunsafeslicelen()
|
||||
@ -125,12 +132,13 @@ func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
|
||||
mem, overflow := math.MulUintptr(et.size, uintptr(len))
|
||||
if overflow || mem > -uintptr(ptr) {
|
||||
if ptr == nil {
|
||||
panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
|
||||
panicunsafeslicenilptr()
|
||||
}
|
||||
panicunsafeslicelen()
|
||||
}
|
||||
}
|
||||
|
||||
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
|
||||
func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
|
||||
len := int(len64)
|
||||
if int64(len) != len64 {
|
||||
@ -153,6 +161,10 @@ func panicunsafeslicelen() {
|
||||
panic(errorString("unsafe.Slice: len out of range"))
|
||||
}
|
||||
|
||||
func panicunsafeslicenilptr() {
|
||||
panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
|
||||
}
|
||||
|
||||
// growslice handles slice growth during append.
|
||||
// It is passed the slice element type, the old slice, and the desired new minimum capacity,
|
||||
// and it returns a new slice with at least that capacity, with the old data
|
||||
|
Loading…
Reference in New Issue
Block a user