2014-06-17 00:03:03 -06:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
|
|
|
// Declarations for runtime services implemented in C or assembly.
|
|
|
|
// C implementations of these functions are in stubs.goc.
|
|
|
|
// Assembly implementations are in various files, see comments with
|
|
|
|
// each function.
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
const (
|
|
|
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
|
|
|
)
|
|
|
|
|
2014-06-17 00:03:03 -06:00
|
|
|
//go:noescape
|
|
|
|
func gogetcallerpc(p unsafe.Pointer) uintptr
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
//go:noescape
|
|
|
|
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
|
|
|
|
|
2014-06-17 00:03:03 -06:00
|
|
|
//go:noescape
|
|
|
|
func racereadrangepc(addr unsafe.Pointer, len int, callpc, pc uintptr)
|
2014-07-16 15:16:19 -06:00
|
|
|
|
2014-07-31 13:43:40 -06:00
|
|
|
//go:noescape
|
|
|
|
func racewriterangepc(addr unsafe.Pointer, len int, callpc, pc uintptr)
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
// Should be a built-in for unsafe.Pointer?
|
|
|
|
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
|
|
|
|
return unsafe.Pointer(uintptr(p) + x)
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// n must be a power of 2
|
|
|
|
func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer {
|
|
|
|
return unsafe.Pointer((uintptr(p) + n - 1) &^ (n - 1))
|
|
|
|
}
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
// in stubs.goc
|
2014-07-30 10:01:52 -06:00
|
|
|
func acquirem() *m
|
|
|
|
func releasem(mp *m)
|
2014-08-18 06:33:39 -06:00
|
|
|
func gomcache() *mcache
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-08-06 15:33:57 -06:00
|
|
|
// An mFunction represents a C function that runs on the M stack. It
|
|
|
|
// can be called from Go using mcall or onM. Through the magic of
|
|
|
|
// linking, an mFunction variable and the corresponding C code entry
|
|
|
|
// point live at the same address.
|
|
|
|
type mFunction byte
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// in asm_*.s
|
2014-08-06 15:33:57 -06:00
|
|
|
func mcall(fn *mFunction)
|
|
|
|
func onM(fn *mFunction)
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-08-06 15:33:57 -06:00
|
|
|
// C functions that run on the M stack. Call these like
|
|
|
|
// mcall(&mcacheRefill_m)
|
2014-07-30 10:01:52 -06:00
|
|
|
// Arguments should be passed in m->scalararg[x] and
|
|
|
|
// m->ptrarg[x]. Return values can be passed in those
|
|
|
|
// same slots.
|
2014-08-06 15:33:57 -06:00
|
|
|
var (
|
|
|
|
mcacheRefill_m,
|
|
|
|
largeAlloc_m,
|
|
|
|
mprofMalloc_m,
|
|
|
|
gc_m,
|
|
|
|
setFinalizer_m,
|
2014-08-07 03:34:30 -06:00
|
|
|
markallocated_m,
|
|
|
|
unrollgcprog_m,
|
2014-08-19 01:49:59 -06:00
|
|
|
unrollgcproginplace_m,
|
|
|
|
gosched_m mFunction
|
2014-08-06 15:33:57 -06:00
|
|
|
)
|
2014-07-16 15:16:19 -06:00
|
|
|
|
|
|
|
// memclr clears n bytes starting at ptr.
|
|
|
|
// in memclr_*.s
|
2014-08-07 14:58:42 -06:00
|
|
|
//go:noescape
|
2014-07-16 15:16:19 -06:00
|
|
|
func memclr(ptr unsafe.Pointer, n uintptr)
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
func racemalloc(p unsafe.Pointer, size uintptr)
|
|
|
|
func tracealloc(p unsafe.Pointer, size uintptr, typ *_type)
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
// memmove copies n bytes from "from" to "to".
|
|
|
|
// in memmove_*.s
|
2014-08-07 14:58:42 -06:00
|
|
|
//go:noescape
|
2014-07-16 15:16:19 -06:00
|
|
|
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
|
|
|
|
|
|
|
|
// in asm_*.s
|
|
|
|
func fastrand2() uint32
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
const (
|
|
|
|
gcpercentUnknown = -2
|
|
|
|
concurrentSweep = true
|
|
|
|
)
|
|
|
|
|
2014-08-07 03:34:30 -06:00
|
|
|
// Atomic operations to read/write a pointer.
|
|
|
|
// in stubs.goc
|
|
|
|
func goatomicloadp(p unsafe.Pointer) unsafe.Pointer // return *p
|
|
|
|
func goatomicstorep(p unsafe.Pointer, v unsafe.Pointer) // *p = v
|
|
|
|
|
|
|
|
// in stubs.goc
|
2014-07-16 15:16:19 -06:00
|
|
|
// if *p == x { *p = y; return true } else { return false }, atomically
|
|
|
|
//go:noescape
|
|
|
|
func gocas(p *uint32, x uint32, y uint32) bool
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
//go:noescape
|
|
|
|
func gocasx(p *uintptr, x uintptr, y uintptr) bool
|
|
|
|
|
|
|
|
func goreadgogc() int32
|
|
|
|
func gonanotime() int64
|
|
|
|
func gosched()
|
|
|
|
func starttheworld()
|
|
|
|
func stoptheworld()
|
|
|
|
func clearpools()
|
|
|
|
|
2014-07-16 15:16:19 -06:00
|
|
|
// exported value for testing
|
|
|
|
var hashLoad = loadFactor
|
|
|
|
|
|
|
|
// in asm_*.s
|
|
|
|
//go:noescape
|
2014-08-07 15:52:55 -06:00
|
|
|
func memeq(a, b unsafe.Pointer, size uintptr) bool
|
2014-07-16 15:16:19 -06:00
|
|
|
|
2014-08-07 14:58:42 -06:00
|
|
|
// Code pointers for the nohash/noequal algorithms. Used for producing better error messages.
|
2014-07-16 15:16:19 -06:00
|
|
|
var nohashcode uintptr
|
2014-08-07 14:58:42 -06:00
|
|
|
var noequalcode uintptr
|
2014-07-18 00:30:38 -06:00
|
|
|
|
|
|
|
// Go version of runtime.throw.
|
|
|
|
// in panic.c
|
2014-07-22 15:08:52 -06:00
|
|
|
func gothrow(s string)
|
2014-07-30 10:01:52 -06:00
|
|
|
|
|
|
|
func golock(x *lock)
|
|
|
|
func gounlock(x *lock)
|
|
|
|
func semacquire(*uint32, bool)
|
|
|
|
func semrelease(*uint32)
|
2014-07-31 16:07:05 -06:00
|
|
|
|
|
|
|
// Return the Go equivalent of the C Alg structure.
|
|
|
|
// TODO: at some point Go will hold the truth for the layout
|
|
|
|
// of runtime structures and C will be derived from it (if
|
|
|
|
// needed at all). At that point this function can go away.
|
|
|
|
type goalgtype struct {
|
|
|
|
// function for hashing objects of this type
|
|
|
|
// (ptr to object, size, seed) -> hash
|
|
|
|
hash func(unsafe.Pointer, uintptr, uintptr) uintptr
|
2014-08-07 15:52:55 -06:00
|
|
|
// function for comparing objects of this type
|
|
|
|
// (ptr to object A, ptr to object B, size) -> ==?
|
|
|
|
equal func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
|
2014-07-31 16:07:05 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func goalg(a *alg) *goalgtype {
|
|
|
|
return (*goalgtype)(unsafe.Pointer(a))
|
|
|
|
}
|
|
|
|
|
|
|
|
// noescape hides a pointer from escape analysis. noescape is
|
|
|
|
// the identity function but escape analysis doesn't think the
|
|
|
|
// output depends on the input. noescape is inlined and currently
|
|
|
|
// compiles down to a single xor instruction.
|
|
|
|
// USE CAREFULLY!
|
|
|
|
func noescape(p unsafe.Pointer) unsafe.Pointer {
|
|
|
|
x := uintptr(p)
|
|
|
|
return unsafe.Pointer(x ^ 0)
|
|
|
|
}
|
2014-08-07 14:58:42 -06:00
|
|
|
|
|
|
|
// gopersistentalloc allocates a permanent (not garbage collected)
|
|
|
|
// memory region of size n. Use wisely!
|
|
|
|
func gopersistentalloc(n uintptr) unsafe.Pointer
|