mirror of
https://github.com/golang/go
synced 2024-11-16 21:34:52 -07:00
9839668b56
A future change to gofmt will rewrite // Doc comment. //go:foo to // Doc comment. // //go:foo Apply that change preemptively to all comments (not necessarily just doc comments). For #51082. Change-Id: Iffe0285418d1e79d34526af3520b415a12203ca9 Reviewed-on: https://go-review.googlesource.com/c/go/+/384260 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org>
475 lines
12 KiB
Go
475 lines
12 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"internal/abi"
|
|
"unsafe"
|
|
)
|
|
|
|
type mOS struct {
|
|
initialized bool
|
|
mutex pthreadmutex
|
|
cond pthreadcond
|
|
count int
|
|
}
|
|
|
|
func unimplemented(name string) {
|
|
println(name, "not implemented")
|
|
*(*int)(unsafe.Pointer(uintptr(1231))) = 1231
|
|
}
|
|
|
|
//go:nosplit
|
|
func semacreate(mp *m) {
|
|
if mp.initialized {
|
|
return
|
|
}
|
|
mp.initialized = true
|
|
if err := pthread_mutex_init(&mp.mutex, nil); err != 0 {
|
|
throw("pthread_mutex_init")
|
|
}
|
|
if err := pthread_cond_init(&mp.cond, nil); err != 0 {
|
|
throw("pthread_cond_init")
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func semasleep(ns int64) int32 {
|
|
var start int64
|
|
if ns >= 0 {
|
|
start = nanotime()
|
|
}
|
|
mp := getg().m
|
|
pthread_mutex_lock(&mp.mutex)
|
|
for {
|
|
if mp.count > 0 {
|
|
mp.count--
|
|
pthread_mutex_unlock(&mp.mutex)
|
|
return 0
|
|
}
|
|
if ns >= 0 {
|
|
spent := nanotime() - start
|
|
if spent >= ns {
|
|
pthread_mutex_unlock(&mp.mutex)
|
|
return -1
|
|
}
|
|
var t timespec
|
|
t.setNsec(ns - spent)
|
|
err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t)
|
|
if err == _ETIMEDOUT {
|
|
pthread_mutex_unlock(&mp.mutex)
|
|
return -1
|
|
}
|
|
} else {
|
|
pthread_cond_wait(&mp.cond, &mp.mutex)
|
|
}
|
|
}
|
|
}
|
|
|
|
//go:nosplit
|
|
func semawakeup(mp *m) {
|
|
pthread_mutex_lock(&mp.mutex)
|
|
mp.count++
|
|
if mp.count > 0 {
|
|
pthread_cond_signal(&mp.cond)
|
|
}
|
|
pthread_mutex_unlock(&mp.mutex)
|
|
}
|
|
|
|
// The read and write file descriptors used by the sigNote functions.
|
|
var sigNoteRead, sigNoteWrite int32
|
|
|
|
// sigNoteSetup initializes an async-signal-safe note.
|
|
//
|
|
// The current implementation of notes on Darwin is not async-signal-safe,
|
|
// because the functions pthread_mutex_lock, pthread_cond_signal, and
|
|
// pthread_mutex_unlock, called by semawakeup, are not async-signal-safe.
|
|
// There is only one case where we need to wake up a note from a signal
|
|
// handler: the sigsend function. The signal handler code does not require
|
|
// all the features of notes: it does not need to do a timed wait.
|
|
// This is a separate implementation of notes, based on a pipe, that does
|
|
// not support timed waits but is async-signal-safe.
|
|
func sigNoteSetup(*note) {
|
|
if sigNoteRead != 0 || sigNoteWrite != 0 {
|
|
throw("duplicate sigNoteSetup")
|
|
}
|
|
var errno int32
|
|
sigNoteRead, sigNoteWrite, errno = pipe()
|
|
if errno != 0 {
|
|
throw("pipe failed")
|
|
}
|
|
closeonexec(sigNoteRead)
|
|
closeonexec(sigNoteWrite)
|
|
|
|
// Make the write end of the pipe non-blocking, so that if the pipe
|
|
// buffer is somehow full we will not block in the signal handler.
|
|
// Leave the read end of the pipe blocking so that we will block
|
|
// in sigNoteSleep.
|
|
setNonblock(sigNoteWrite)
|
|
}
|
|
|
|
// sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup.
|
|
func sigNoteWakeup(*note) {
|
|
var b byte
|
|
write(uintptr(sigNoteWrite), unsafe.Pointer(&b), 1)
|
|
}
|
|
|
|
// sigNoteSleep waits for a note created by sigNoteSetup to be woken.
|
|
func sigNoteSleep(*note) {
|
|
for {
|
|
var b byte
|
|
entersyscallblock()
|
|
n := read(sigNoteRead, unsafe.Pointer(&b), 1)
|
|
exitsyscall()
|
|
if n != -_EINTR {
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
// BSD interface for threading.
|
|
func osinit() {
|
|
// pthread_create delayed until end of goenvs so that we
|
|
// can look at the environment first.
|
|
|
|
ncpu = getncpu()
|
|
physPageSize = getPageSize()
|
|
}
|
|
|
|
func sysctlbynameInt32(name []byte) (int32, int32) {
|
|
out := int32(0)
|
|
nout := unsafe.Sizeof(out)
|
|
ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
|
|
return ret, out
|
|
}
|
|
|
|
//go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname
|
|
func internal_cpu_getsysctlbyname(name []byte) (int32, int32) {
|
|
return sysctlbynameInt32(name)
|
|
}
|
|
|
|
const (
|
|
_CTL_HW = 6
|
|
_HW_NCPU = 3
|
|
_HW_PAGESIZE = 7
|
|
)
|
|
|
|
func getncpu() int32 {
|
|
// Use sysctl to fetch hw.ncpu.
|
|
mib := [2]uint32{_CTL_HW, _HW_NCPU}
|
|
out := uint32(0)
|
|
nout := unsafe.Sizeof(out)
|
|
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
|
|
if ret >= 0 && int32(out) > 0 {
|
|
return int32(out)
|
|
}
|
|
return 1
|
|
}
|
|
|
|
func getPageSize() uintptr {
|
|
// Use sysctl to fetch hw.pagesize.
|
|
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
|
|
out := uint32(0)
|
|
nout := unsafe.Sizeof(out)
|
|
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
|
|
if ret >= 0 && int32(out) > 0 {
|
|
return uintptr(out)
|
|
}
|
|
return 0
|
|
}
|
|
|
|
var urandom_dev = []byte("/dev/urandom\x00")
|
|
|
|
//go:nosplit
|
|
func getRandomData(r []byte) {
|
|
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
|
|
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
|
|
closefd(fd)
|
|
extendRandom(r, int(n))
|
|
}
|
|
|
|
func goenvs() {
|
|
goenvs_unix()
|
|
}
|
|
|
|
// May run with m.p==nil, so write barriers are not allowed.
|
|
//
|
|
//go:nowritebarrierrec
|
|
func newosproc(mp *m) {
|
|
stk := unsafe.Pointer(mp.g0.stack.hi)
|
|
if false {
|
|
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
|
|
}
|
|
|
|
// Initialize an attribute object.
|
|
var attr pthreadattr
|
|
var err int32
|
|
err = pthread_attr_init(&attr)
|
|
if err != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
|
|
// Find out OS stack size for our own stack guard.
|
|
var stacksize uintptr
|
|
if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
mp.g0.stack.hi = stacksize // for mstart
|
|
|
|
// Tell the pthread library we won't join with this thread.
|
|
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
|
|
// Finally, create the thread. It starts at mstart_stub, which does some low-level
|
|
// setup and then calls mstart.
|
|
var oset sigset
|
|
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
|
|
err = pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
|
|
sigprocmask(_SIG_SETMASK, &oset, nil)
|
|
if err != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
}
|
|
|
|
// glue code to call mstart from pthread_create.
|
|
func mstart_stub()
|
|
|
|
// newosproc0 is a version of newosproc that can be called before the runtime
|
|
// is initialized.
|
|
//
|
|
// This function is not safe to use after initialization as it does not pass an M as fnarg.
|
|
//
|
|
//go:nosplit
|
|
func newosproc0(stacksize uintptr, fn uintptr) {
|
|
// Initialize an attribute object.
|
|
var attr pthreadattr
|
|
var err int32
|
|
err = pthread_attr_init(&attr)
|
|
if err != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
|
|
// The caller passes in a suggested stack size,
|
|
// from when we allocated the stack and thread ourselves,
|
|
// without libpthread. Now that we're using libpthread,
|
|
// we use the OS default stack size instead of the suggestion.
|
|
// Find out that stack size for our own stack guard.
|
|
if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
g0.stack.hi = stacksize // for mstart
|
|
memstats.stacks_sys.add(int64(stacksize))
|
|
|
|
// Tell the pthread library we won't join with this thread.
|
|
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
|
|
// Finally, create the thread. It starts at mstart_stub, which does some low-level
|
|
// setup and then calls mstart.
|
|
var oset sigset
|
|
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
|
|
err = pthread_create(&attr, fn, nil)
|
|
sigprocmask(_SIG_SETMASK, &oset, nil)
|
|
if err != 0 {
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
exit(1)
|
|
}
|
|
}
|
|
|
|
var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
|
|
var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
|
|
|
|
// Called to do synchronous initialization of Go code built with
|
|
// -buildmode=c-archive or -buildmode=c-shared.
|
|
// None of the Go runtime is initialized.
|
|
//
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func libpreinit() {
|
|
initsig(true)
|
|
}
|
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
|
|
func mpreinit(mp *m) {
|
|
mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
|
|
mp.gsignal.m = mp
|
|
if GOOS == "darwin" && GOARCH == "arm64" {
|
|
// mlock the signal stack to work around a kernel bug where it may
|
|
// SIGILL when the signal stack is not faulted in while a signal
|
|
// arrives. See issue 42774.
|
|
mlock(unsafe.Pointer(mp.gsignal.stack.hi-physPageSize), physPageSize)
|
|
}
|
|
}
|
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
// Called on the new thread, cannot allocate memory.
|
|
func minit() {
|
|
// iOS does not support alternate signal stack.
|
|
// The signal handler handles it directly.
|
|
if !(GOOS == "ios" && GOARCH == "arm64") {
|
|
minitSignalStack()
|
|
}
|
|
minitSignalMask()
|
|
getg().m.procid = uint64(pthread_self())
|
|
}
|
|
|
|
// Called from dropm to undo the effect of an minit.
|
|
//
|
|
//go:nosplit
|
|
func unminit() {
|
|
// iOS does not support alternate signal stack.
|
|
// See minit.
|
|
if !(GOOS == "ios" && GOARCH == "arm64") {
|
|
unminitSignals()
|
|
}
|
|
}
|
|
|
|
// Called from exitm, but not from drop, to undo the effect of thread-owned
|
|
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
|
|
func mdestroy(mp *m) {
|
|
}
|
|
|
|
//go:nosplit
|
|
func osyield_no_g() {
|
|
usleep_no_g(1)
|
|
}
|
|
|
|
//go:nosplit
|
|
func osyield() {
|
|
usleep(1)
|
|
}
|
|
|
|
const (
|
|
_NSIG = 32
|
|
_SI_USER = 0 /* empirically true, but not what headers say */
|
|
_SIG_BLOCK = 1
|
|
_SIG_UNBLOCK = 2
|
|
_SIG_SETMASK = 3
|
|
_SS_DISABLE = 4
|
|
)
|
|
|
|
//extern SigTabTT runtime·sigtab[];
|
|
|
|
type sigset uint32
|
|
|
|
var sigset_all = ^sigset(0)
|
|
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func setsig(i uint32, fn uintptr) {
|
|
var sa usigactiont
|
|
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
|
|
sa.sa_mask = ^uint32(0)
|
|
if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
|
|
if iscgo {
|
|
fn = abi.FuncPCABI0(cgoSigtramp)
|
|
} else {
|
|
fn = abi.FuncPCABI0(sigtramp)
|
|
}
|
|
}
|
|
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
|
|
sigaction(i, &sa, nil)
|
|
}
|
|
|
|
// sigtramp is the callback from libc when a signal is received.
|
|
// It is called with the C calling convention.
|
|
func sigtramp()
|
|
func cgoSigtramp()
|
|
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func setsigstack(i uint32) {
|
|
var osa usigactiont
|
|
sigaction(i, nil, &osa)
|
|
handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u))
|
|
if osa.sa_flags&_SA_ONSTACK != 0 {
|
|
return
|
|
}
|
|
var sa usigactiont
|
|
*(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler
|
|
sa.sa_mask = osa.sa_mask
|
|
sa.sa_flags = osa.sa_flags | _SA_ONSTACK
|
|
sigaction(i, &sa, nil)
|
|
}
|
|
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func getsig(i uint32) uintptr {
|
|
var sa usigactiont
|
|
sigaction(i, nil, &sa)
|
|
return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
|
|
}
|
|
|
|
// setSignaltstackSP sets the ss_sp field of a stackt.
|
|
//
|
|
//go:nosplit
|
|
func setSignalstackSP(s *stackt, sp uintptr) {
|
|
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
|
|
}
|
|
|
|
//go:nosplit
|
|
//go:nowritebarrierrec
|
|
func sigaddset(mask *sigset, i int) {
|
|
*mask |= 1 << (uint32(i) - 1)
|
|
}
|
|
|
|
func sigdelset(mask *sigset, i int) {
|
|
*mask &^= 1 << (uint32(i) - 1)
|
|
}
|
|
|
|
func setProcessCPUProfiler(hz int32) {
|
|
setProcessCPUProfilerTimer(hz)
|
|
}
|
|
|
|
func setThreadCPUProfiler(hz int32) {
|
|
setThreadCPUProfilerHz(hz)
|
|
}
|
|
|
|
//go:nosplit
|
|
func validSIGPROF(mp *m, c *sigctxt) bool {
|
|
return true
|
|
}
|
|
|
|
//go:linkname executablePath os.executablePath
|
|
var executablePath string
|
|
|
|
func sysargs(argc int32, argv **byte) {
|
|
// skip over argv, envv and the first string will be the path
|
|
n := argc + 1
|
|
for argv_index(argv, n) != nil {
|
|
n++
|
|
}
|
|
executablePath = gostringnocopy(argv_index(argv, n+1))
|
|
|
|
// strip "executable_path=" prefix if available, it's added after OS X 10.11.
|
|
const prefix = "executable_path="
|
|
if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix {
|
|
executablePath = executablePath[len(prefix):]
|
|
}
|
|
}
|
|
|
|
func signalM(mp *m, sig int) {
|
|
pthread_kill(pthread(mp.procid), uint32(sig))
|
|
}
|
|
|
|
// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
|
|
// number.
|
|
const sigPerThreadSyscall = 1 << 31
|
|
|
|
//go:nosplit
|
|
func runPerThreadSyscall() {
|
|
throw("runPerThreadSyscall only valid on linux")
|
|
}
|