1
0
mirror of https://github.com/golang/go synced 2024-11-26 04:37:59 -07:00

[dev.regabi] reflect: support for register ABI on amd64 for reflect.(Value).Call

This change adds support for the new register ABI on amd64 to
reflect.(Value).Call. If internal/abi's register counts are non-zero,
reflect will try to set up arguments in registers on the Call path.

Note that because the register ABI becomes ABI0 with zero registers
available, this should keep working as it did before.

This change does not add any tests for the register ABI case because
there's no way to do so at the moment.

For #40724.

Change-Id: I8aa089a5aa5a31b72e56b3d9388dd3f82203985b
Reviewed-on: https://go-review.googlesource.com/c/go/+/272568
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-10-22 16:29:04 +00:00 committed by Michael Knyszek
parent b81efb7ec4
commit e0215315f5
22 changed files with 951 additions and 242 deletions

View File

@ -71,11 +71,15 @@ var depsRules = `
# No dependencies allowed for any of these packages. # No dependencies allowed for any of these packages.
NONE NONE
< container/list, container/ring, < container/list, container/ring,
internal/abi, internal/cfg, internal/cpu, internal/cfg, internal/cpu,
internal/goversion, internal/nettrace, internal/goversion, internal/nettrace,
unicode/utf8, unicode/utf16, unicode, unicode/utf8, unicode/utf16, unicode,
unsafe; unsafe;
# These packages depend only on unsafe.
unsafe
< internal/abi;
# RUNTIME is the core runtime group of packages, all of them very light-weight. # RUNTIME is the core runtime group of packages, all of them very light-weight.
internal/abi, internal/cpu, unsafe internal/abi, internal/cpu, unsafe
< internal/bytealg < internal/bytealg

View File

@ -4,9 +4,50 @@
package abi package abi
import "unsafe"
// RegArgs is a struct that has space for each argument // RegArgs is a struct that has space for each argument
// and return value register on the current architecture. // and return value register on the current architecture.
//
// Assembly code knows the layout of the first two fields
// of RegArgs.
//
// RegArgs also contains additional space to hold pointers
// when it may not be safe to keep them only in the integer
// register space otherwise.
type RegArgs struct { type RegArgs struct {
Ints [IntArgRegs]uintptr Ints [IntArgRegs]uintptr // untyped integer registers
Floats [FloatArgRegs]uint64 Floats [FloatArgRegs]uint64 // untyped float registers
// Fields above this point are known to assembly.
// Ptrs is a space that duplicates Ints but with pointer type,
// used to make pointers passed or returned in registers
// visible to the GC by making the type unsafe.Pointer.
Ptrs [IntArgRegs]unsafe.Pointer
// ReturnIsPtr is a bitmap that indicates which registers
// contain or will contain pointers on the return path from
// a reflectcall. The i'th bit indicates whether the i'th
// register contains or will contain a valid Go pointer.
ReturnIsPtr IntArgRegBitmap
}
// IntArgRegBitmap is a bitmap large enough to hold one bit per
// integer argument/return register.
type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8
// Set sets the i'th bit of the bitmap to 1.
func (b *IntArgRegBitmap) Set(i int) {
b[i/8] |= uint8(1) << (i % 8)
}
// Get returns whether the i'th bit of the bitmap is set.
//
// nosplit because it's called in extremely sensitive contexts, like
// on the reflectcall return path.
//
//go:nosplit
func (b *IntArgRegBitmap) Get(i int) bool {
return b[i/8]&(uint8(1)<<(i%8)) != 0
} }

403
src/reflect/abi.go Normal file
View File

@ -0,0 +1,403 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflect
import (
"internal/abi"
"unsafe"
)
// abiStep represents an ABI "instruction." Each instruction
// describes one part of how to translate between a Go value
// in memory and a call frame.
type abiStep struct {
kind abiStepKind
// offset and size together describe a part of a Go value
// in memory.
offset uintptr
size uintptr // size in bytes of the part
// These fields describe the ABI side of the translation.
stkOff uintptr // stack offset, used if kind == abiStepStack
ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer
freg int // FP register index, used if kind == abiStepFloatReg
}
// abiStepKind is the "op-code" for an abiStep instruction.
type abiStepKind int
const (
abiStepBad abiStepKind = iota
abiStepStack // copy to/from stack
abiStepIntReg // copy to/from integer register
abiStepPointer // copy pointer to/from integer register
abiStepFloatReg // copy to/from FP register
)
// abiSeq represents a sequence of ABI instructions for copying
// from a series of reflect.Values to a call frame (for call arguments)
// or vice-versa (for call results).
//
// An abiSeq should be populated by calling its addArg method.
type abiSeq struct {
// steps is the set of instructions.
//
// The instructions are grouped together by whole arguments,
// with the starting index for the instructions
// of the i'th Go value available in valueStart.
//
// For instance, if this abiSeq represents 3 arguments
// passed to a function, then the 2nd argument's steps
// begin at steps[valueStart[1]].
//
// Because reflect accepts Go arguments in distinct
// Values and each Value is stored separately, each abiStep
// that begins a new argument will have its offset
// field == 0.
steps []abiStep
valueStart []int
stackBytes uintptr // stack space used
iregs, fregs int // registers used
}
func (a *abiSeq) dump() {
for i, p := range a.steps {
println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg)
}
print("values ")
for _, i := range a.valueStart {
print(i, " ")
}
println()
println("stack", a.stackBytes)
println("iregs", a.iregs)
println("fregs", a.fregs)
}
// stepsForValue returns the ABI instructions for translating
// the i'th Go argument or return value represented by this
// abiSeq to the Go ABI.
func (a *abiSeq) stepsForValue(i int) []abiStep {
s := a.valueStart[i]
var e int
if i == len(a.valueStart)-1 {
e = len(a.steps)
} else {
e = a.valueStart[i+1]
}
return a.steps[s:e]
}
// addArg extends the abiSeq with a new Go value of type t.
//
// If the value was stack-assigned, returns the single
// abiStep describing that translation, and nil otherwise.
func (a *abiSeq) addArg(t *rtype) *abiStep {
pStart := len(a.steps)
a.valueStart = append(a.valueStart, pStart)
if !a.regAssign(t, 0) {
a.steps = a.steps[:pStart]
a.stackAssign(t.size, uintptr(t.align))
return &a.steps[len(a.steps)-1]
}
return nil
}
// addRcvr extends the abiSeq with a new method call
// receiver according to the interface calling convention.
//
// If the receiver was stack-assigned, returns the single
// abiStep describing that translation, and nil otherwise.
// Returns true if the receiver is a pointer.
func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
// The receiver is always one word.
a.valueStart = append(a.valueStart, len(a.steps))
var ok, ptr bool
if ifaceIndir(rcvr) || rcvr.pointers() {
ok = a.assignIntN(0, ptrSize, 1, 0b1)
ptr = true
} else {
// TODO(mknyszek): Is this case even possible?
// The interface data work never contains a non-pointer
// value. This case was copied over from older code
// in the reflect package which only conditionally added
// a pointer bit to the reflect.(Value).Call stack frame's
// GC bitmap.
ok = a.assignIntN(0, ptrSize, 1, 0b0)
ptr = false
}
if !ok {
a.stackAssign(ptrSize, ptrSize)
return &a.steps[len(a.steps)-1], ptr
}
return nil, ptr
}
// regAssign attempts to reserve argument registers for a value of
// type t, stored at some offset.
//
// It returns whether or not the assignment succeeded, but
// leaves any changes it made to a.steps behind, so the caller
// must undo that work by adjusting a.steps if it fails.
//
// This method along with the assign* methods represent the
// complete register-assignment algorithm for the Go ABI.
func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
switch t.Kind() {
case UnsafePointer, Ptr, Chan, Map, Func:
return a.assignIntN(offset, t.size, 1, 0b1)
case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
return a.assignIntN(offset, t.size, 1, 0b0)
case Int64, Uint64:
switch ptrSize {
case 4:
return a.assignIntN(offset, 4, 2, 0b0)
case 8:
return a.assignIntN(offset, 8, 1, 0b0)
}
case Float32, Float64:
return a.assignFloatN(offset, t.size, 1)
case Complex64:
return a.assignFloatN(offset, 4, 2)
case Complex128:
return a.assignFloatN(offset, 8, 2)
case String:
return a.assignIntN(offset, ptrSize, 2, 0b01)
case Interface:
return a.assignIntN(offset, ptrSize, 2, 0b10)
case Slice:
return a.assignIntN(offset, ptrSize, 3, 0b001)
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
switch tt.len {
case 0:
// There's nothing to assign, so don't modify
// a.steps but succeed so the caller doesn't
// try to stack-assign this value.
return true
case 1:
return a.regAssign(tt.elem, offset)
default:
return false
}
case Struct:
if t.size == 0 {
// There's nothing to assign, so don't modify
// a.steps but succeed so the caller doesn't
// try to stack-assign this value.
return true
}
st := (*structType)(unsafe.Pointer(t))
for i := range st.fields {
f := &st.fields[i]
if f.typ.Size() == 0 {
// Ignore zero-sized fields.
continue
}
if !a.regAssign(f.typ, offset+f.offset()) {
return false
}
}
return true
default:
print("t.Kind == ", t.Kind(), "\n")
panic("unknown type kind")
}
panic("unhandled register assignment path")
}
// assignIntN assigns n values to registers, each "size" bytes large,
// from the data at [offset, offset+n*size) in memory. Each value at
// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
// next n integer registers.
//
// Bit i in ptrMap indicates whether the i'th value is a pointer.
// n must be <= 8.
//
// Returns whether assignment succeeded.
func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
if n > 8 || n < 0 {
panic("invalid n")
}
if ptrMap != 0 && size != ptrSize {
panic("non-empty pointer map passed for non-pointer-size values")
}
if a.iregs+n > abi.IntArgRegs {
return false
}
for i := 0; i < n; i++ {
kind := abiStepIntReg
if ptrMap&(uint8(1)<<i) != 0 {
kind = abiStepPointer
}
a.steps = append(a.steps, abiStep{
kind: kind,
offset: offset + uintptr(i)*size,
size: size,
ireg: a.iregs,
})
a.iregs++
}
return true
}
// assignFloatN assigns n values to registers, each "size" bytes large,
// from the data at [offset, offset+n*size) in memory. Each value at
// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
// next n floating-point registers.
//
// Returns whether assignment succeeded.
func (a *abiSeq) assignFloatN(offset, size uintptr, n int) bool {
if n < 0 {
panic("invalid n")
}
if a.fregs+n > abi.FloatArgRegs || abi.EffectiveFloatRegSize < size {
return false
}
for i := 0; i < n; i++ {
a.steps = append(a.steps, abiStep{
kind: abiStepFloatReg,
offset: offset + uintptr(i)*size,
size: size,
freg: a.fregs,
})
a.fregs++
}
return true
}
// stackAssign reserves space for one value that is "size" bytes
// large with alignment "alignment" to the stack.
//
// Should not be called directly; use addArg instead.
func (a *abiSeq) stackAssign(size, alignment uintptr) {
a.stackBytes = align(a.stackBytes, alignment)
a.steps = append(a.steps, abiStep{
kind: abiStepStack,
offset: 0, // Only used for whole arguments, so the memory offset is 0.
size: size,
stkOff: a.stackBytes,
})
a.stackBytes += size
}
// abiDesc describes the ABI for a function or method.
type abiDesc struct {
// call and ret represent the translation steps for
// the call and return paths of a Go function.
call, ret abiSeq
// These fields describe the stack space allocated
// for the call. stackCallArgsSize is the amount of space
// reserved for arguments but not return values. retOffset
// is the offset at which return values begin, and
// spill is the size in bytes of additional space reserved
// to spill argument registers into in case of preemption in
// reflectcall's stack frame.
stackCallArgsSize, retOffset, spill uintptr
// stackPtrs is a bitmap that indicates whether
// each word in the ABI stack space (stack-assigned
// args + return values) is a pointer. Used
// as the heap pointer bitmap for stack space
// passed to reflectcall.
stackPtrs *bitVector
// outRegPtrs is a bitmap whose i'th bit indicates
// whether the i'th integer result register contains
// a pointer. Used by reflectcall to make result
// pointers visible to the GC.
outRegPtrs abi.IntArgRegBitmap
}
func (a *abiDesc) dump() {
println("ABI")
println("call")
a.call.dump()
println("ret")
a.ret.dump()
println("stackCallArgsSize", a.stackCallArgsSize)
println("retOffset", a.retOffset)
println("spill", a.spill)
}
func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
// We need to add space for this argument to
// the frame so that it can spill args into it.
//
// The size of this space is just the sum of the sizes
// of each register-allocated type.
//
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
spillInt := uintptr(0)
spillFloat := uintptr(0)
// Compute gc program & stack bitmap for stack arguments
stackPtrs := new(bitVector)
// Compute abiSeq for input parameters.
var in abiSeq
if rcvr != nil {
stkStep, isPtr := in.addRcvr(rcvr)
if stkStep != nil {
if isPtr {
stackPtrs.append(1)
} else {
stackPtrs.append(0)
}
} else {
spillInt += ptrSize
}
}
for _, arg := range t.in() {
i, f := in.iregs, in.fregs
stkStep := in.addArg(arg)
if stkStep != nil {
addTypeBits(stackPtrs, stkStep.stkOff, arg)
} else {
i, f = in.iregs-i, in.fregs-f
spillInt += uintptr(i) * ptrSize
spillFloat += uintptr(f) * abi.EffectiveFloatRegSize
}
}
spill := align(spillInt+spillFloat, ptrSize)
// From the input parameters alone, we now know
// the stackCallArgsSize and retOffset.
stackCallArgsSize := in.stackBytes
retOffset := align(in.stackBytes, ptrSize)
// Compute the stack frame pointer bitmap and register
// pointer bitmap for return values.
outRegPtrs := abi.IntArgRegBitmap{}
// Compute abiSeq for output parameters.
var out abiSeq
// Stack-assigned return values do not share
// space with arguments like they do with registers,
// so we need to inject a stack offset here.
// Fake it by artifically extending stackBytes by
// the return offset.
out.stackBytes = retOffset
for i, res := range t.out() {
stkStep := out.addArg(res)
if stkStep != nil {
addTypeBits(stackPtrs, stkStep.stkOff, res)
} else {
for _, st := range out.stepsForValue(i) {
if st.kind == abiStepPointer {
outRegPtrs.Set(st.ireg)
}
}
}
}
// Undo the faking from earlier so that stackBytes
// is accurate.
out.stackBytes -= retOffset
return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, outRegPtrs}
}

View File

@ -23,15 +23,17 @@ const PtrSize = ptrSize
func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) { func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) {
var ft *rtype var ft *rtype
var s *bitVector var abi abiDesc
if rcvr != nil { if rcvr != nil {
ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype)) ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype))
} else { } else {
ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil)
} }
argSize = abi.stackCallArgsSize
retOffset = abi.retOffset
frametype = ft frametype = ft
for i := uint32(0); i < s.n; i++ { for i := uint32(0); i < abi.stackPtrs.n; i++ {
stack = append(stack, s.data[i/8]>>(i%8)&1) stack = append(stack, abi.stackPtrs.data[i/8]>>(i%8)&1)
} }
if ft.kind&kindGCProg != 0 { if ft.kind&kindGCProg != 0 {
panic("can't handle gc programs") panic("can't handle gc programs")

View File

@ -60,9 +60,9 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
code := **(**uintptr)(unsafe.Pointer(&dummy)) code := **(**uintptr)(unsafe.Pointer(&dummy))
// makeFuncImpl contains a stack map for use by the runtime // makeFuncImpl contains a stack map for use by the runtime
_, argLen, _, stack, _ := funcLayout(ftyp, nil) _, _, abi := funcLayout(ftyp, nil)
impl := &makeFuncImpl{code: code, stack: stack, argLen: argLen, ftyp: ftyp, fn: fn} impl := &makeFuncImpl{code: code, stack: abi.stackPtrs, argLen: abi.stackCallArgsSize, ftyp: ftyp, fn: fn}
return Value{t, unsafe.Pointer(impl), flag(Func)} return Value{t, unsafe.Pointer(impl), flag(Func)}
} }
@ -112,12 +112,12 @@ func makeMethodValue(op string, v Value) Value {
code := **(**uintptr)(unsafe.Pointer(&dummy)) code := **(**uintptr)(unsafe.Pointer(&dummy))
// methodValue contains a stack map for use by the runtime // methodValue contains a stack map for use by the runtime
_, argLen, _, stack, _ := funcLayout(ftyp, nil) _, _, abi := funcLayout(ftyp, nil)
fv := &methodValue{ fv := &methodValue{
fn: code, fn: code,
stack: stack, stack: abi.stackPtrs,
argLen: argLen, argLen: abi.stackCallArgsSize,
method: int(v.flag) >> flagMethodShift, method: int(v.flag) >> flagMethodShift,
rcvr: rcvr, rcvr: rcvr,
} }

View File

@ -2984,21 +2984,20 @@ type layoutKey struct {
type layoutType struct { type layoutType struct {
t *rtype t *rtype
argSize uintptr // size of arguments
retOffset uintptr // offset of return values.
stack *bitVector
framePool *sync.Pool framePool *sync.Pool
abi abiDesc
} }
var layoutCache sync.Map // map[layoutKey]layoutType var layoutCache sync.Map // map[layoutKey]layoutType
// funcLayout computes a struct type representing the layout of the // funcLayout computes a struct type representing the layout of the
// function arguments and return values for the function type t. // stack-assigned function arguments and return values for the function
// type t.
// If rcvr != nil, rcvr specifies the type of the receiver. // If rcvr != nil, rcvr specifies the type of the receiver.
// The returned type exists only for GC, so we only fill out GC relevant info. // The returned type exists only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in // Currently, that's just size and the GC program. We also fill in
// the name for possible debugging use. // the name for possible debugging use.
func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) {
if t.Kind() != Func { if t.Kind() != Func {
panic("reflect: funcLayout of non-func type " + t.String()) panic("reflect: funcLayout of non-func type " + t.String())
} }
@ -3008,46 +3007,24 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
k := layoutKey{t, rcvr} k := layoutKey{t, rcvr}
if lti, ok := layoutCache.Load(k); ok { if lti, ok := layoutCache.Load(k); ok {
lt := lti.(layoutType) lt := lti.(layoutType)
return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool return lt.t, lt.framePool, lt.abi
} }
// compute gc program & stack bitmap for arguments // Compute the ABI layout.
ptrmap := new(bitVector) abi = newAbiDesc(t, rcvr)
var offset uintptr
if rcvr != nil {
// Reflect uses the "interface" calling convention for
// methods, where receivers take one word of argument
// space no matter how big they actually are.
if ifaceIndir(rcvr) || rcvr.pointers() {
ptrmap.append(1)
} else {
ptrmap.append(0)
}
offset += ptrSize
}
for _, arg := range t.in() {
offset += -offset & uintptr(arg.align-1)
addTypeBits(ptrmap, offset, arg)
offset += arg.size
}
argSize = offset
offset += -offset & (ptrSize - 1)
retOffset = offset
for _, res := range t.out() {
offset += -offset & uintptr(res.align-1)
addTypeBits(ptrmap, offset, res)
offset += res.size
}
offset += -offset & (ptrSize - 1)
// build dummy rtype holding gc program // build dummy rtype holding gc program
x := &rtype{ x := &rtype{
align: ptrSize, align: ptrSize,
size: offset, // Don't add spill space here; it's only necessary in
ptrdata: uintptr(ptrmap.n) * ptrSize, // reflectcall's frame, not in the allocated frame.
// TODO(mknyszek): Remove this comment when register
// spill space in the frame is no longer required.
size: align(abi.retOffset+abi.ret.stackBytes, ptrSize),
ptrdata: uintptr(abi.stackPtrs.n) * ptrSize,
} }
if ptrmap.n > 0 { if abi.stackPtrs.n > 0 {
x.gcdata = &ptrmap.data[0] x.gcdata = &abi.stackPtrs.data[0]
} }
var s string var s string
@ -3064,13 +3041,11 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
}} }}
lti, _ := layoutCache.LoadOrStore(k, layoutType{ lti, _ := layoutCache.LoadOrStore(k, layoutType{
t: x, t: x,
argSize: argSize,
retOffset: retOffset,
stack: ptrmap,
framePool: framePool, framePool: framePool,
abi: abi,
}) })
lt := lti.(layoutType) lt := lti.(layoutType)
return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool return lt.t, lt.framePool, lt.abi
} }
// ifaceIndir reports whether t is stored indirectly in an interface value. // ifaceIndir reports whether t is stored indirectly in an interface value.

View File

@ -5,6 +5,7 @@
package reflect package reflect
import ( import (
"internal/abi"
"internal/unsafeheader" "internal/unsafeheader"
"math" "math"
"runtime" "runtime"
@ -352,6 +353,8 @@ func (v Value) CallSlice(in []Value) []Value {
var callGC bool // for testing; see TestCallMethodJump var callGC bool // for testing; see TestCallMethodJump
const debugReflectCall = false
func (v Value) call(op string, in []Value) []Value { func (v Value) call(op string, in []Value) []Value {
// Get function pointer, type. // Get function pointer, type.
t := (*funcType)(unsafe.Pointer(v.typ)) t := (*funcType)(unsafe.Pointer(v.typ))
@ -430,50 +433,112 @@ func (v Value) call(op string, in []Value) []Value {
} }
nout := t.NumOut() nout := t.NumOut()
// Compute frame type. // Register argument space.
frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype) var regArgs abi.RegArgs
// Allocate a chunk of memory for frame. // Compute frame type.
var args unsafe.Pointer frametype, framePool, abi := funcLayout(t, rcvrtype)
if nout == 0 {
args = framePool.Get().(unsafe.Pointer) // Allocate a chunk of memory for frame if needed.
} else { var stackArgs unsafe.Pointer
// Can't use pool if the function has return values. if frametype.size != 0 {
// We will leak pointer to args in ret, so its lifetime is not scoped. if nout == 0 {
args = unsafe_New(frametype) stackArgs = framePool.Get().(unsafe.Pointer)
} else {
// Can't use pool if the function has return values.
// We will leak pointer to args in ret, so its lifetime is not scoped.
stackArgs = unsafe_New(frametype)
}
}
frameSize := frametype.size
if debugReflectCall {
println("reflect.call", t.String())
abi.dump()
} }
off := uintptr(0)
// Copy inputs into args. // Copy inputs into args.
// Handle receiver.
inStart := 0
if rcvrtype != nil { if rcvrtype != nil {
storeRcvr(rcvr, args) // Guaranteed to only be one word in size,
off = ptrSize // so it will only take up exactly 1 abiStep (either
// in a register or on the stack).
switch st := abi.call.steps[0]; st.kind {
case abiStepStack:
storeRcvr(rcvr, stackArgs)
case abiStepIntReg, abiStepPointer:
// Even pointers can go into the uintptr slot because
// they'll be kept alive by the Values referenced by
// this frame. Reflection forces these to be heap-allocated,
// so we don't need to worry about stack copying.
storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
case abiStepFloatReg:
storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
default:
panic("unknown ABI parameter kind")
}
inStart = 1
} }
// Handle arguments.
for i, v := range in { for i, v := range in {
v.mustBeExported() v.mustBeExported()
targ := t.In(i).(*rtype) targ := t.In(i).(*rtype)
a := uintptr(targ.align) // TODO(mknyszek): Figure out if it's possible to get some
off = (off + a - 1) &^ (a - 1) // scratch space for this assignment check. Previously, it
n := targ.size // was possible to use space in the argument frame.
if n == 0 { v = v.assignTo("reflect.Value.Call", targ, nil)
// Not safe to compute args+off pointing at 0 bytes, stepsLoop:
// because that might point beyond the end of the frame, for _, st := range abi.call.stepsForValue(i + inStart) {
// but we still need to call assignTo to check assignability. switch st.kind {
v.assignTo("reflect.Value.Call", targ, nil) case abiStepStack:
continue // Copy values to the "stack."
addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
if v.flag&flagIndir != 0 {
typedmemmove(targ, addr, v.ptr)
} else {
*(*unsafe.Pointer)(addr) = v.ptr
}
// There's only one step for a stack-allocated value.
break stepsLoop
case abiStepIntReg, abiStepPointer:
// Copy values to "integer registers."
if v.flag&flagIndir != 0 {
offset := add(v.ptr, st.offset, "precomputed value offset")
memmove(unsafe.Pointer(&regArgs.Ints[st.ireg]), offset, st.size)
} else {
if st.kind == abiStepPointer {
// Duplicate this pointer in the pointer area of the
// register space. Otherwise, there's the potential for
// this to be the last reference to v.ptr.
regArgs.Ptrs[st.ireg] = v.ptr
}
regArgs.Ints[st.ireg] = uintptr(v.ptr)
}
case abiStepFloatReg:
// Copy values to "float registers."
if v.flag&flagIndir == 0 {
panic("attempted to copy pointer to FP register")
}
offset := add(v.ptr, st.offset, "precomputed value offset")
memmove(unsafe.Pointer(&regArgs.Floats[st.freg]), offset, st.size)
default:
panic("unknown ABI part kind")
}
} }
addr := add(args, off, "n > 0")
v = v.assignTo("reflect.Value.Call", targ, addr)
if v.flag&flagIndir != 0 {
typedmemmove(targ, addr, v.ptr)
} else {
*(*unsafe.Pointer)(addr) = v.ptr
}
off += n
} }
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
frameSize = align(frameSize, ptrSize)
frameSize += abi.spill
// Mark pointers in registers for the return path.
regArgs.ReturnIsPtr = abi.outRegPtrs
// Call. // Call.
call(frametype, fn, args, uint32(frametype.size), uint32(retOffset)) call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abi.retOffset), uint32(frameSize), &regArgs)
// For testing; see TestCallMethodJump. // For testing; see TestCallMethodJump.
if callGC { if callGC {
@ -482,34 +547,82 @@ func (v Value) call(op string, in []Value) []Value {
var ret []Value var ret []Value
if nout == 0 { if nout == 0 {
typedmemclr(frametype, args) if stackArgs != nil {
framePool.Put(args) typedmemclr(frametype, stackArgs)
framePool.Put(stackArgs)
}
} else { } else {
// Zero the now unused input area of args, if stackArgs != nil {
// because the Values returned by this function contain pointers to the args object, // Zero the now unused input area of args,
// and will thus keep the args object alive indefinitely. // because the Values returned by this function contain pointers to the args object,
typedmemclrpartial(frametype, args, 0, retOffset) // and will thus keep the args object alive indefinitely.
typedmemclrpartial(frametype, stackArgs, 0, abi.retOffset)
}
// Wrap Values around return values in args. // Wrap Values around return values in args.
ret = make([]Value, nout) ret = make([]Value, nout)
off = retOffset
for i := 0; i < nout; i++ { for i := 0; i < nout; i++ {
tv := t.Out(i) tv := t.Out(i)
a := uintptr(tv.Align()) if tv.Size() == 0 {
off = (off + a - 1) &^ (a - 1) // For zero-sized return value, args+off may point to the next object.
if tv.Size() != 0 { // In this case, return the zero value instead.
ret[i] = Zero(tv)
continue
}
steps := abi.ret.stepsForValue(i)
if st := steps[0]; st.kind == abiStepStack {
// This value is on the stack. If part of a value is stack
// allocated, the entire value is according to the ABI. So
// just make an indirection into the allocated frame.
fl := flagIndir | flag(tv.Kind()) fl := flagIndir | flag(tv.Kind())
ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl} ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
// Note: this does introduce false sharing between results - // Note: this does introduce false sharing between results -
// if any result is live, they are all live. // if any result is live, they are all live.
// (And the space for the args is live as well, but as we've // (And the space for the args is live as well, but as we've
// cleared that space it isn't as big a deal.) // cleared that space it isn't as big a deal.)
} else { continue
// For zero-sized return value, args+off may point to the next object.
// In this case, return the zero value instead.
ret[i] = Zero(tv)
} }
off += tv.Size()
// Handle pointers passed in registers.
if !ifaceIndir(tv.common()) {
// Pointer-valued data gets put directly
// into v.ptr.
if steps[0].kind != abiStepPointer {
print("kind=", steps[0].kind, ", type=", tv.String(), "\n")
panic("mismatch between ABI description and types")
}
ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(t.Kind())}
continue
}
// All that's left is values passed in registers that we need to
// create space for and copy values back into.
//
// TODO(mknyszek): We make a new allocation for each register-allocated
// value, but previously we could always point into the heap-allocated
// stack frame. This is a regression that could be fixed by adding
// additional space to the allocated stack frame and storing the
// register-allocated return values into the allocated stack frame and
// referring there in the resulting Value.
s := unsafe_New(tv.common())
for _, st := range steps {
switch st.kind {
case abiStepIntReg:
offset := add(s, st.offset, "precomputed value offset")
memmove(offset, unsafe.Pointer(&regArgs.Ints[st.ireg]), st.size)
case abiStepPointer:
s := add(s, st.offset, "precomputed value offset")
*((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
case abiStepFloatReg:
offset := add(s, st.offset, "precomputed value offset")
memmove(offset, unsafe.Pointer(&regArgs.Floats[st.freg]), st.size)
case abiStepStack:
panic("register-based return value has stack component")
default:
panic("unknown ABI part kind")
}
}
ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())}
} }
} }
@ -709,7 +822,8 @@ func align(x, n uintptr) uintptr {
func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
rcvr := ctxt.rcvr rcvr := ctxt.rcvr
rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method) rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype) frametype, framePool, abid := funcLayout(t, rcvrtype)
argSize, retOffset := abid.stackCallArgsSize, abid.retOffset
// Make a new frame that is one word bigger so we can store the receiver. // Make a new frame that is one word bigger so we can store the receiver.
// This space is used for both arguments and return values. // This space is used for both arguments and return values.
@ -727,10 +841,19 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
typedmemmovepartial(frametype, add(scratch, argOffset, "argSize > argOffset"), frame, argOffset, argSize-argOffset) typedmemmovepartial(frametype, add(scratch, argOffset, "argSize > argOffset"), frame, argOffset, argSize-argOffset)
} }
frameSize := frametype.size
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
frameSize = align(frameSize, ptrSize)
frameSize += abid.spill
// Call. // Call.
// Call copies the arguments from scratch to the stack, calls fn, // Call copies the arguments from scratch to the stack, calls fn,
// and then copies the results back into scratch. // and then copies the results back into scratch.
call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset)) //
// TODO(mknyszek): Have this actually support the register-based ABI.
var regs abi.RegArgs
call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset), uint32(frameSize), &regs)
// Copy return values. // Copy return values.
// Ignore any changes to args and just copy return values. // Ignore any changes to args and just copy return values.
@ -2802,14 +2925,32 @@ func mapiternext(it unsafe.Pointer)
//go:noescape //go:noescape
func maplen(m unsafe.Pointer) int func maplen(m unsafe.Pointer) int
// call calls fn with a copy of the n argument bytes pointed at by arg. // call calls fn with "stackArgsSize" bytes of stack arguments laid out
// After fn returns, reflectcall copies n-retoffset result bytes // at stackArgs and register arguments laid out in regArgs. frameSize is
// back into arg+retoffset before returning. If copying result bytes back, // the total amount of stack space that will be reserved by call, so this
// the caller must pass the argument frame type as argtype, so that // should include enough space to spill register arguments to the stack in
// call can execute appropriate write barriers during the copy. // case of preemption.
// //
// After fn returns, call copies stackArgsSize-stackRetOffset result bytes
// back into stackArgs+stackRetOffset before returning, for any return
// values passed on the stack. Register-based return values will be found
// in the same regArgs structure.
//
// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap
// indicating which registers will contain pointer-valued return values. The
// purpose of this bitmap is to keep pointers visible to the GC between
// returning from reflectcall and actually using them.
//
// If copying result bytes back from the stack, the caller must pass the
// argument frame type as stackArgsType, so that call can execute appropriate
// write barriers during the copy.
//
// Arguments passed through to call do not escape. The type is used only in a
// very limited callee of call, the stackArgs are copied, and regArgs is only
// used in the call frame.
//go:noescape
//go:linkname call runtime.reflectcall //go:linkname call runtime.reflectcall
func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer) func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)

View File

@ -458,7 +458,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP runtime·morestack(SB) JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -470,8 +470,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP AX JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results. // Note: can't just "JMP NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT, $0-20 TEXT ·reflectcall(SB), NOSPLIT, $0-28
MOVL argsize+12(FP), CX MOVL frameSize+20(FP), CX
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -503,11 +503,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-20
JMP AX JMP AX
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVL argptr+8(FP), SI; \ MOVL stackArgs+8(FP), SI; \
MOVL argsize+12(FP), CX; \ MOVL stackArgsSize+12(FP), CX; \
MOVL SP, DI; \ MOVL SP, DI; \
REP;MOVSB; \ REP;MOVSB; \
/* call function */ \ /* call function */ \
@ -516,10 +516,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
CALL AX; \ CALL AX; \
/* copy return values back */ \ /* copy return values back */ \
MOVL argtype+0(FP), DX; \ MOVL stackArgsType+0(FP), DX; \
MOVL argptr+8(FP), DI; \ MOVL stackArgs+8(FP), DI; \
MOVL argsize+12(FP), CX; \ MOVL stackArgsSize+12(FP), CX; \
MOVL retoffset+16(FP), BX; \ MOVL stackRetOffset+16(FP), BX; \
MOVL SP, SI; \ MOVL SP, SI; \
ADDL BX, DI; \ ADDL BX, DI; \
ADDL BX, SI; \ ADDL BX, SI; \
@ -531,11 +531,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $16-0 TEXT callRet<>(SB), NOSPLIT, $20-0
MOVL DX, 0(SP) MOVL DX, 0(SP)
MOVL DI, 4(SP) MOVL DI, 4(SP)
MOVL SI, 8(SP) MOVL SI, 8(SP)
MOVL CX, 12(SP) MOVL CX, 12(SP)
MOVL $0, 16(SP)
CALL runtime·reflectcallmove(SB) CALL runtime·reflectcallmove(SB)
RET RET

View File

@ -445,8 +445,74 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX MOVL $0, DX
JMP runtime·morestack(SB) JMP runtime·morestack(SB)
#ifdef GOEXPERIMENT_REGABI
// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
TEXT spillArgs<>(SB),NOSPLIT,$0-0
MOVQ AX, 0(R12)
MOVQ BX, 8(R12)
MOVQ CX, 16(R12)
MOVQ DI, 24(R12)
MOVQ SI, 32(R12)
MOVQ R8, 40(R12)
MOVQ R9, 48(R12)
MOVQ R10, 56(R12)
MOVQ R11, 64(R12)
MOVQ X0, 72(R12)
MOVQ X1, 80(R12)
MOVQ X2, 88(R12)
MOVQ X3, 96(R12)
MOVQ X4, 104(R12)
MOVQ X5, 112(R12)
MOVQ X6, 120(R12)
MOVQ X7, 128(R12)
MOVQ X8, 136(R12)
MOVQ X9, 144(R12)
MOVQ X10, 152(R12)
MOVQ X11, 160(R12)
MOVQ X12, 168(R12)
MOVQ X13, 176(R12)
MOVQ X14, 184(R12)
RET
// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
TEXT unspillArgs<>(SB),NOSPLIT,$0-0
MOVQ 0(R12), AX
MOVQ 8(R12), BX
MOVQ 16(R12), CX
MOVQ 24(R12), DI
MOVQ 32(R12), SI
MOVQ 40(R12), R8
MOVQ 48(R12), R9
MOVQ 56(R12), R10
MOVQ 64(R12), R11
MOVQ 72(R12), X0
MOVQ 80(R12), X1
MOVQ 88(R12), X2
MOVQ 96(R12), X3
MOVQ 104(R12), X4
MOVQ 112(R12), X5
MOVQ 120(R12), X6
MOVQ 128(R12), X7
MOVQ 136(R12), X8
MOVQ 144(R12), X9
MOVQ 152(R12), X10
MOVQ 160(R12), X11
MOVQ 168(R12), X12
MOVQ 176(R12), X13
MOVQ 184(R12), X14
RET
#else
// spillArgs stores return values from registers to a pointer in R12.
TEXT spillArgs<>(SB),NOSPLIT,$0-0
RET
// unspillArgs loads args into registers from a pointer in R12.
TEXT unspillArgs<>(SB),NOSPLIT,$0-0
RET
#endif
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -458,8 +524,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
JMP AX JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results. // Note: can't just "JMP NAME(SB)" - bad inlining results.
TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-32 TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-48
MOVLQZX argsize+24(FP), CX MOVLQZX frameSize+32(FP), CX
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -491,23 +557,28 @@ TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-32
JMP AX JMP AX
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVQ argptr+16(FP), SI; \ MOVQ stackArgs+16(FP), SI; \
MOVLQZX argsize+24(FP), CX; \ MOVLQZX stackArgsSize+24(FP), CX; \
MOVQ SP, DI; \ MOVQ SP, DI; \
REP;MOVSB; \ REP;MOVSB; \
/* set up argument registers */ \
MOVQ regArgs+40(FP), R12; \
CALL unspillArgs<>(SB); \
/* call function */ \ /* call function */ \
MOVQ f+8(FP), DX; \ MOVQ f+8(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
MOVQ (DX), AX; \ MOVQ (DX), R12; \
CALL AX; \ CALL R12; \
/* copy return values back */ \ /* copy register return values back */ \
MOVQ argtype+0(FP), DX; \ MOVQ regArgs+40(FP), R12; \
MOVQ argptr+16(FP), DI; \ CALL spillArgs<>(SB); \
MOVLQZX argsize+24(FP), CX; \ MOVLQZX stackArgsSize+24(FP), CX; \
MOVLQZX retoffset+28(FP), BX; \ MOVLQZX stackRetOffset+28(FP), BX; \
MOVQ stackArgs+16(FP), DI; \
MOVQ stackArgsType+0(FP), DX; \
MOVQ SP, SI; \ MOVQ SP, SI; \
ADDQ BX, DI; \ ADDQ BX, DI; \
ADDQ BX, SI; \ ADDQ BX, SI; \
@ -519,12 +590,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
NO_LOCAL_POINTERS NO_LOCAL_POINTERS
MOVQ DX, 0(SP) MOVQ DX, 0(SP)
MOVQ DI, 8(SP) MOVQ DI, 8(SP)
MOVQ SI, 16(SP) MOVQ SI, 16(SP)
MOVQ CX, 24(SP) MOVQ CX, 24(SP)
MOVQ R12, 32(SP)
CALL runtime·reflectcallmove(SB) CALL runtime·reflectcallmove(SB)
RET RET

View File

@ -404,7 +404,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B runtime·morestack(SB) B runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -415,8 +415,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
MOVW $NAME(SB), R1; \ MOVW $NAME(SB), R1; \
B (R1) B (R1)
TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
MOVW argsize+12(FP), R0 MOVW frameSize+20(FP), R0
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -448,11 +448,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
B (R1) B (R1)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVW argptr+8(FP), R0; \ MOVW stackArgs+8(FP), R0; \
MOVW argsize+12(FP), R2; \ MOVW stackArgsSize+12(FP), R2; \
ADD $4, R13, R1; \ ADD $4, R13, R1; \
CMP $0, R2; \ CMP $0, R2; \
B.EQ 5(PC); \ B.EQ 5(PC); \
@ -466,10 +466,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \ BL (R0); \
/* copy return values back */ \ /* copy return values back */ \
MOVW argtype+0(FP), R4; \ MOVW stackArgsType+0(FP), R4; \
MOVW argptr+8(FP), R0; \ MOVW stackArgs+8(FP), R0; \
MOVW argsize+12(FP), R2; \ MOVW stackArgsSize+12(FP), R2; \
MOVW retoffset+16(FP), R3; \ MOVW stackArgsRetOffset+16(FP), R3; \
ADD $4, R13, R1; \ ADD $4, R13, R1; \
ADD R3, R1; \ ADD R3, R1; \
ADD R3, R0; \ ADD R3, R0; \
@ -481,11 +481,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $16-0 TEXT callRet<>(SB), NOSPLIT, $20-0
MOVW R4, 4(R13) MOVW R4, 4(R13)
MOVW R0, 8(R13) MOVW R0, 8(R13)
MOVW R1, 12(R13) MOVW R1, 12(R13)
MOVW R2, 16(R13) MOVW R2, 16(R13)
MOVW $0, R7
MOVW R7, 20(R13)
BL runtime·reflectcallmove(SB) BL runtime·reflectcallmove(SB)
RET RET

View File

@ -312,7 +312,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B runtime·morestack(SB) B runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -325,8 +325,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B (R27) B (R27)
// Note: can't just "B NAME(SB)" - bad inlining results. // Note: can't just "B NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
MOVWU argsize+24(FP), R16 MOVWU frameSize+32(FP), R16
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -358,11 +358,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
B (R0) B (R0)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVD arg+16(FP), R3; \ MOVD stackArgs+16(FP), R3; \
MOVWU argsize+24(FP), R4; \ MOVWU stackArgsSize+24(FP), R4; \
ADD $8, RSP, R5; \ ADD $8, RSP, R5; \
BIC $0xf, R4, R6; \ BIC $0xf, R4, R6; \
CBZ R6, 6(PC); \ CBZ R6, 6(PC); \
@ -388,10 +388,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \ BL (R0); \
/* copy return values back */ \ /* copy return values back */ \
MOVD argtype+0(FP), R7; \ MOVD stackArgsType+0(FP), R7; \
MOVD arg+16(FP), R3; \ MOVD stackArgs+16(FP), R3; \
MOVWU n+24(FP), R4; \ MOVWU stackArgsSize+24(FP), R4; \
MOVWU retoffset+28(FP), R6; \ MOVWU stackRetOffset+28(FP), R6; \
ADD $8, RSP, R5; \ ADD $8, RSP, R5; \
ADD R6, R5; \ ADD R6, R5; \
ADD R6, R3; \ ADD R6, R3; \
@ -403,11 +403,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $40-0 TEXT callRet<>(SB), NOSPLIT, $48-0
MOVD R7, 8(RSP) MOVD R7, 8(RSP)
MOVD R3, 16(RSP) MOVD R3, 16(RSP)
MOVD R5, 24(RSP) MOVD R5, 24(RSP)
MOVD R4, 32(RSP) MOVD R4, 32(RSP)
MOVD $0, 40(RSP)
BL runtime·reflectcallmove(SB) BL runtime·reflectcallmove(SB)
RET RET

View File

@ -264,7 +264,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
JMP runtime·morestack(SB) JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -277,8 +277,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
JMP (R4) JMP (R4)
// Note: can't just "BR NAME(SB)" - bad inlining results. // Note: can't just "BR NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
MOVWU argsize+24(FP), R1 MOVWU frameSize+32(FP), R1
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -310,11 +310,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
JMP (R4) JMP (R4)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVV arg+16(FP), R1; \ MOVV stackArgs+16(FP), R1; \
MOVWU argsize+24(FP), R2; \ MOVWU stackArgsSize+24(FP), R2; \
MOVV R29, R3; \ MOVV R29, R3; \
ADDV $8, R3; \ ADDV $8, R3; \
ADDV R3, R2; \ ADDV R3, R2; \
@ -330,10 +330,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
JAL (R4); \ JAL (R4); \
/* copy return values back */ \ /* copy return values back */ \
MOVV argtype+0(FP), R5; \ MOVV stackArgsType+0(FP), R5; \
MOVV arg+16(FP), R1; \ MOVV stackArgs+16(FP), R1; \
MOVWU n+24(FP), R2; \ MOVWU stackArgsSize+24(FP), R2; \
MOVWU retoffset+28(FP), R4; \ MOVWU stackRetOffset+28(FP), R4; \
ADDV $8, R29, R3; \ ADDV $8, R29, R3; \
ADDV R4, R3; \ ADDV R4, R3; \
ADDV R4, R1; \ ADDV R4, R1; \
@ -345,11 +345,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
MOVV R5, 8(R29) MOVV R5, 8(R29)
MOVV R1, 16(R29) MOVV R1, 16(R29)
MOVV R3, 24(R29) MOVV R3, 24(R29)
MOVV R2, 32(R29) MOVV R2, 32(R29)
MOVV $0, 40(R29)
JAL runtime·reflectcallmove(SB) JAL runtime·reflectcallmove(SB)
RET RET

View File

@ -265,7 +265,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP runtime·morestack(SB) JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
@ -276,8 +276,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
MOVW $NAME(SB), R4; \ MOVW $NAME(SB), R4; \
JMP (R4) JMP (R4)
TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
MOVW argsize+12(FP), R1 MOVW frameSize+20(FP), R1
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
@ -310,11 +310,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
JMP (R4) JMP (R4)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \ TEXT NAME(SB),WRAPPER,$MAXSIZE-28; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVW arg+8(FP), R1; \ MOVW stackArgs+8(FP), R1; \
MOVW argsize+12(FP), R2; \ MOVW stackArgsSize+12(FP), R2; \
MOVW R29, R3; \ MOVW R29, R3; \
ADDU $4, R3; \ ADDU $4, R3; \
ADDU R3, R2; \ ADDU R3, R2; \
@ -330,10 +330,10 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
JAL (R4); \ JAL (R4); \
/* copy return values back */ \ /* copy return values back */ \
MOVW argtype+0(FP), R5; \ MOVW stackArgsType+0(FP), R5; \
MOVW arg+8(FP), R1; \ MOVW stackArgs+8(FP), R1; \
MOVW n+12(FP), R2; \ MOVW stackArgsSize+12(FP), R2; \
MOVW retoffset+16(FP), R4; \ MOVW stackRetOffset+16(FP), R4; \
ADDU $4, R29, R3; \ ADDU $4, R29, R3; \
ADDU R4, R3; \ ADDU R4, R3; \
ADDU R4, R1; \ ADDU R4, R1; \
@ -345,11 +345,12 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $16-0 TEXT callRet<>(SB), NOSPLIT, $20-0
MOVW R5, 4(R29) MOVW R5, 4(R29)
MOVW R1, 8(R29) MOVW R1, 8(R29)
MOVW R3, 12(R29) MOVW R3, 12(R29)
MOVW R2, 16(R29) MOVW R2, 16(R29)
MOVW $0, 20(R29)
JAL runtime·reflectcallmove(SB) JAL runtime·reflectcallmove(SB)
RET RET

View File

@ -339,7 +339,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR runtime·morestack(SB) BR runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -353,8 +353,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR (CTR) BR (CTR)
// Note: can't just "BR NAME(SB)" - bad inlining results. // Note: can't just "BR NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
MOVWZ argsize+24(FP), R3 MOVWZ frameSize+32(FP), R3
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -387,11 +387,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
BR (CTR) BR (CTR)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVD arg+16(FP), R3; \ MOVD stackArgs+16(FP), R3; \
MOVWZ argsize+24(FP), R4; \ MOVWZ stackArgsSize+24(FP), R4; \
MOVD R1, R5; \ MOVD R1, R5; \
CMP R4, $8; \ CMP R4, $8; \
BLT tailsetup; \ BLT tailsetup; \
@ -439,10 +439,10 @@ callfn: \
MOVD 24(R1), R2; \ MOVD 24(R1), R2; \
#endif \ #endif \
/* copy return values back */ \ /* copy return values back */ \
MOVD argtype+0(FP), R7; \ MOVD stackArgsType+0(FP), R7; \
MOVD arg+16(FP), R3; \ MOVD stackArgs+16(FP), R3; \
MOVWZ n+24(FP), R4; \ MOVWZ stackArgsSize+24(FP), R4; \
MOVWZ retoffset+28(FP), R6; \ MOVWZ stackRetOffset+28(FP), R6; \
ADD $FIXED_FRAME, R1, R5; \ ADD $FIXED_FRAME, R1, R5; \
ADD R6, R5; \ ADD R6, R5; \
ADD R6, R3; \ ADD R6, R3; \
@ -454,11 +454,12 @@ callfn: \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
MOVD R7, FIXED_FRAME+0(R1) MOVD R7, FIXED_FRAME+0(R1)
MOVD R3, FIXED_FRAME+8(R1) MOVD R3, FIXED_FRAME+8(R1)
MOVD R5, FIXED_FRAME+16(R1) MOVD R5, FIXED_FRAME+16(R1)
MOVD R4, FIXED_FRAME+24(R1) MOVD R4, FIXED_FRAME+24(R1)
MOVD $0, FIXED_FRAME+32(R1)
BL runtime·reflectcallmove(SB) BL runtime·reflectcallmove(SB)
RET RET

View File

@ -359,7 +359,7 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET RET
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -371,13 +371,13 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
JALR ZERO, T2 JALR ZERO, T2
// Note: can't just "BR NAME(SB)" - bad inlining results. // Note: can't just "BR NAME(SB)" - bad inlining results.
// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) // func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
TEXT reflect·call(SB), NOSPLIT, $0-0 TEXT reflect·call(SB), NOSPLIT, $0-0
JMP ·reflectcall(SB) JMP ·reflectcall(SB)
// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) // func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
MOVWU argsize+24(FP), T0 MOVWU frameSize+32(FP), T0
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -409,11 +409,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
JALR ZERO, T2 JALR ZERO, T2
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOV arg+16(FP), A1; \ MOV stackArgs+16(FP), A1; \
MOVWU argsize+24(FP), A2; \ MOVWU stackArgsSize+24(FP), A2; \
MOV X2, A3; \ MOV X2, A3; \
ADD $8, A3; \ ADD $8, A3; \
ADD A3, A2; \ ADD A3, A2; \
@ -429,10 +429,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
JALR RA, A4; \ JALR RA, A4; \
/* copy return values back */ \ /* copy return values back */ \
MOV argtype+0(FP), A5; \ MOV stackArgsType+0(FP), A5; \
MOV arg+16(FP), A1; \ MOV stackArgs+16(FP), A1; \
MOVWU n+24(FP), A2; \ MOVWU stackArgsSize+24(FP), A2; \
MOVWU retoffset+28(FP), A4; \ MOVWU stackRetOffset+28(FP), A4; \
ADD $8, X2, A3; \ ADD $8, X2, A3; \
ADD A4, A3; \ ADD A4, A3; \
ADD A4, A1; \ ADD A4, A1; \
@ -444,11 +444,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
MOV A5, 8(X2) MOV A5, 8(X2)
MOV A1, 16(X2) MOV A1, 16(X2)
MOV A3, 24(X2) MOV A3, 24(X2)
MOV A2, 32(X2) MOV A2, 32(X2)
MOV $0, 40(X2)
CALL runtime·reflectcallmove(SB) CALL runtime·reflectcallmove(SB)
RET RET

View File

@ -353,7 +353,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR runtime·morestack(SB) BR runtime·morestack(SB)
// reflectcall: call a function with the given argument list // reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number // we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc. // of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future! // Caution: ugly multiline assembly macros in your future!
@ -366,8 +366,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR (R5) BR (R5)
// Note: can't just "BR NAME(SB)" - bad inlining results. // Note: can't just "BR NAME(SB)" - bad inlining results.
TEXT ·reflectcall(SB), NOSPLIT, $-8-32 TEXT ·reflectcall(SB), NOSPLIT, $-8-48
MOVWZ argsize+24(FP), R3 MOVWZ frameSize+32(FP), R3
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64) DISPATCH(runtime·call64, 64)
@ -399,11 +399,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $-8-32
BR (R5) BR (R5)
#define CALLFN(NAME,MAXSIZE) \ #define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \ /* copy arguments to stack */ \
MOVD arg+16(FP), R4; \ MOVD stackArgs+16(FP), R4; \
MOVWZ argsize+24(FP), R5; \ MOVWZ stackArgsSize+24(FP), R5; \
MOVD $stack-MAXSIZE(SP), R6; \ MOVD $stack-MAXSIZE(SP), R6; \
loopArgs: /* copy 256 bytes at a time */ \ loopArgs: /* copy 256 bytes at a time */ \
CMP R5, $256; \ CMP R5, $256; \
@ -424,11 +424,11 @@ callFunction: \
PCDATA $PCDATA_StackMapIndex, $0; \ PCDATA $PCDATA_StackMapIndex, $0; \
BL (R8); \ BL (R8); \
/* copy return values back */ \ /* copy return values back */ \
MOVD argtype+0(FP), R7; \ MOVD stackArgsType+0(FP), R7; \
MOVD arg+16(FP), R6; \ MOVD stackArgs+16(FP), R6; \
MOVWZ n+24(FP), R5; \ MOVWZ stackArgsSize+24(FP), R5; \
MOVD $stack-MAXSIZE(SP), R4; \ MOVD $stack-MAXSIZE(SP), R4; \
MOVWZ retoffset+28(FP), R1; \ MOVWZ stackRetOffset+28(FP), R1; \
ADD R1, R4; \ ADD R1, R4; \
ADD R1, R6; \ ADD R1, R6; \
SUB R1, R5; \ SUB R1, R5; \
@ -439,11 +439,12 @@ callFunction: \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
MOVD R7, 8(R15) MOVD R7, 8(R15)
MOVD R6, 16(R15) MOVD R6, 16(R15)
MOVD R4, 24(R15) MOVD R4, 24(R15)
MOVD R5, 32(R15) MOVD R5, 32(R15)
MOVD $0, 40(R15)
BL runtime·reflectcallmove(SB) BL runtime·reflectcallmove(SB)
RET RET

View File

@ -296,14 +296,14 @@ TEXT ·asmcgocall(SB), NOSPLIT, $0-0
JMP NAME(SB); \ JMP NAME(SB); \
End End
TEXT ·reflectcall(SB), NOSPLIT, $0-32 TEXT ·reflectcall(SB), NOSPLIT, $0-48
I64Load fn+8(FP) I64Load fn+8(FP)
I64Eqz I64Eqz
If If
CALLNORESUME runtime·sigpanic<ABIInternal>(SB) CALLNORESUME runtime·sigpanic<ABIInternal>(SB)
End End
MOVW argsize+24(FP), R0 MOVW frameSize+32(FP), R0
DISPATCH(runtime·call16, 16) DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32) DISPATCH(runtime·call32, 32)
@ -335,18 +335,18 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-32
JMP runtime·badreflectcall(SB) JMP runtime·badreflectcall(SB)
#define CALLFN(NAME, MAXSIZE) \ #define CALLFN(NAME, MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \ NO_LOCAL_POINTERS; \
MOVW argsize+24(FP), R0; \ MOVW stackArgsSize+24(FP), R0; \
\ \
Get R0; \ Get R0; \
I64Eqz; \ I64Eqz; \
Not; \ Not; \
If; \ If; \
Get SP; \ Get SP; \
I64Load argptr+16(FP); \ I64Load stackArgs+16(FP); \
I32WrapI64; \ I32WrapI64; \
I64Load argsize+24(FP); \ I64Load stackArgsSize+24(FP); \
I64Const $3; \ I64Const $3; \
I64ShrU; \ I64ShrU; \
I32WrapI64; \ I32WrapI64; \
@ -359,12 +359,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
I64Load $0; \ I64Load $0; \
CALL; \ CALL; \
\ \
I64Load32U retoffset+28(FP); \ I64Load32U stackRetOffset+28(FP); \
Set R0; \ Set R0; \
\ \
MOVD argtype+0(FP), RET0; \ MOVD stackArgsType+0(FP), RET0; \
\ \
I64Load argptr+16(FP); \ I64Load stackArgs+16(FP); \
Get R0; \ Get R0; \
I64Add; \ I64Add; \
Set RET1; \ Set RET1; \
@ -375,7 +375,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
I64Add; \ I64Add; \
Set RET2; \ Set RET2; \
\ \
I64Load32U argsize+24(FP); \ I64Load32U stackArgsSize+24(FP); \
Get R0; \ Get R0; \
I64Sub; \ I64Sub; \
Set RET3; \ Set RET3; \
@ -387,12 +387,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
// separate function so it can allocate stack space for the arguments // separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its // to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers. // arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0 TEXT callRet<>(SB), NOSPLIT, $40-0
NO_LOCAL_POINTERS NO_LOCAL_POINTERS
MOVD RET0, 0(SP) MOVD RET0, 0(SP)
MOVD RET1, 8(SP) MOVD RET1, 8(SP)
MOVD RET2, 16(SP) MOVD RET2, 16(SP)
MOVD RET3, 24(SP) MOVD RET3, 24(SP)
MOVD $0, 32(SP)
CALL runtime·reflectcallmove(SB) CALL runtime·reflectcallmove(SB)
RET RET

View File

@ -14,6 +14,7 @@
package runtime package runtime
import ( import (
"internal/abi"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
) )
@ -223,11 +224,18 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
// stack map of reflectcall is wrong. // stack map of reflectcall is wrong.
// //
//go:nosplit //go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) { func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize { if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size) bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
} }
memmove(dst, src, size) memmove(dst, src, size)
// Move pointers returned in registers to a place where the GC can see them.
for i := range regs.Ints {
if regs.ReturnIsPtr.Get(i) {
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
}
}
} }
//go:nosplit //go:nosplit

View File

@ -7,6 +7,7 @@
package runtime package runtime
import ( import (
"internal/abi"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
@ -219,7 +220,11 @@ func runfinq() {
throw("bad kind in runfinq") throw("bad kind in runfinq")
} }
fingRunning = true fingRunning = true
reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) // Pass a dummy RegArgs for now.
//
// TODO(mknyszek): Pass arguments in registers.
var regs abi.RegArgs
reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
fingRunning = false fingRunning = false
// Drop finalizer queue heap references // Drop finalizer queue heap references

View File

@ -5,6 +5,7 @@
package runtime package runtime
import ( import (
"internal/abi"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
@ -874,7 +875,13 @@ func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
p.pc = getcallerpc() p.pc = getcallerpc()
p.sp = unsafe.Pointer(getcallersp()) p.sp = unsafe.Pointer(getcallersp())
} }
reflectcall(nil, fn, arg, argsize, argsize) // Pass a dummy RegArgs for now since no function actually implements
// the register-based ABI.
//
// TODO(mknyszek): Implement this properly, setting up arguments in
// registers as necessary in the caller.
var regs abi.RegArgs
reflectcall(nil, fn, arg, argsize, argsize, argsize, &regs)
if p != nil { if p != nil {
p.pc = 0 p.pc = 0
p.sp = unsafe.Pointer(nil) p.sp = unsafe.Pointer(nil)
@ -968,7 +975,9 @@ func gopanic(e interface{}) {
} }
} else { } else {
p.argp = unsafe.Pointer(getargp(0)) p.argp = unsafe.Pointer(getargp(0))
reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
var regs abi.RegArgs
reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), &regs)
} }
p.argp = nil p.argp = nil

View File

@ -4,7 +4,10 @@
package runtime package runtime
import "unsafe" import (
"internal/abi"
"unsafe"
)
// Should be a built-in for unsafe.Pointer? // Should be a built-in for unsafe.Pointer?
//go:nosplit //go:nosplit
@ -174,19 +177,50 @@ func asminit()
func setg(gg *g) func setg(gg *g)
func breakpoint() func breakpoint()
// reflectcall calls fn with a copy of the n argument bytes pointed at by arg. // reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
// After fn returns, reflectcall copies n-retoffset result bytes // frameSize, and regArgs.
// back into arg+retoffset before returning. If copying result bytes back,
// the caller should pass the argument frame type as argtype, so that
// call can execute appropriate write barriers during the copy.
// //
// Package reflect always passes a frame type. In package runtime, // Arguments passed on the stack and space for return values passed on the stack
// Windows callbacks are the only use of this that copies results // must be laid out at the space pointed to by stackArgs (with total length
// back, and those cannot have pointers in their results, so runtime // stackArgsSize) according to the ABI.
// passes nil for the frame type. //
// stackRetOffset must be some value <= stackArgsSize that indicates the
// offset within stackArgs where the return value space begins.
//
// frameSize is the total size of the argument frame at stackArgs and must
// therefore be >= stackArgsSize. It must include additional space for spilling
// register arguments for stack growth and preemption.
//
// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
// since frameSize will be redundant with stackArgsSize.
//
// Arguments passed in registers must be laid out in regArgs according to the ABI.
// regArgs will hold any return values passed in registers after the call.
//
// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
// then copies back stackArgsSize-stackRetOffset bytes back to the return space
// in stackArgs once fn has completed. It also "unspills" argument registers from
// regArgs before calling fn, and spills them back into regArgs immediately
// following the call to fn. If there are results being returned on the stack,
// the caller should pass the argument frame type as stackArgsType so that
// reflectcall can execute appropriate write barriers during the copy.
//
// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
// registers on the return path will contain Go pointers. It will then store
// these pointers in regArgs.Ptrs such that they are visible to the GC.
//
// Package reflect passes a frame type. In package runtime, there is only
// one call that copies results back, in callbackWrap in syscall_windows.go, and it
// does NOT pass a frame type, meaning there are no write barriers invoked. See that
// call site for justification.
// //
// Package reflect accesses this symbol through a linkname. // Package reflect accesses this symbol through a linkname.
func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) //
// Arguments passed through to reflectcall do not escape. The type is used
// only in a very limited callee of reflectcall, the stackArgs are copied, and
// regArgs is only used in the reflectcall frame.
//go:noescape
func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func procyield(cycles uint32) func procyield(cycles uint32)

View File

@ -5,6 +5,7 @@
package runtime package runtime
import ( import (
"internal/abi"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
) )
@ -242,7 +243,11 @@ func callbackWrap(a *callbackArgs) {
// Even though this is copying back results, we can pass a nil // Even though this is copying back results, we can pass a nil
// type because those results must not require write barriers. // type because those results must not require write barriers.
reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset)) //
// Pass a dummy RegArgs for now.
// TODO(mknyszek): Pass arguments in registers.
var regs abi.RegArgs
reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset), uint32(c.retOffset)+sys.PtrSize, &regs)
// Extract the result. // Extract the result.
a.result = *(*uintptr)(unsafe.Pointer(&frame[c.retOffset])) a.result = *(*uintptr)(unsafe.Pointer(&frame[c.retOffset]))