2014-11-11 15:08:54 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-11-11 10:39:30 -07:00
|
|
|
import (
|
|
|
|
"runtime/internal/sys"
|
|
|
|
"unsafe"
|
|
|
|
)
|
2014-11-11 15:08:54 -07:00
|
|
|
|
2016-04-05 20:51:55 -06:00
|
|
|
type mOS struct{}
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
|
|
|
|
|
2014-11-11 15:08:54 -07:00
|
|
|
// Linux futex.
|
|
|
|
//
|
|
|
|
// futexsleep(uint32 *addr, uint32 val)
|
|
|
|
// futexwakeup(uint32 *addr)
|
|
|
|
//
|
|
|
|
// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
|
|
|
|
// Futexwakeup wakes up threads sleeping on addr.
|
|
|
|
// Futexsleep is allowed to wake up spuriously.
|
|
|
|
|
|
|
|
const (
|
|
|
|
_FUTEX_WAIT = 0
|
|
|
|
_FUTEX_WAKE = 1
|
|
|
|
)
|
|
|
|
|
|
|
|
// Atomically,
|
|
|
|
// if(*addr == val) sleep
|
|
|
|
// Might be woken up spuriously; that's allowed.
|
|
|
|
// Don't sleep longer than ns; ns < 0 means forever.
|
|
|
|
//go:nosplit
|
|
|
|
func futexsleep(addr *uint32, val uint32, ns int64) {
|
|
|
|
var ts timespec
|
|
|
|
|
|
|
|
// Some Linux kernels have a bug where futex of
|
|
|
|
// FUTEX_WAIT returns an internal error code
|
2016-03-01 16:21:55 -07:00
|
|
|
// as an errno. Libpthread ignores the return value
|
2014-11-11 15:08:54 -07:00
|
|
|
// here, and so can we: as it says a few lines up,
|
|
|
|
// spurious wakeups are allowed.
|
|
|
|
if ns < 0 {
|
|
|
|
futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, nil, nil, 0)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-14 10:10:52 -07:00
|
|
|
// It's difficult to live within the no-split stack limits here.
|
|
|
|
// On ARM and 386, a 64-bit divide invokes a general software routine
|
|
|
|
// that needs more stack than we can afford. So we use timediv instead.
|
|
|
|
// But on real 64-bit systems, where words are larger but the stack limit
|
|
|
|
// is not, even timediv is too heavy, and we really need to use just an
|
|
|
|
// ordinary machine instruction.
|
2015-11-11 10:39:30 -07:00
|
|
|
if sys.PtrSize == 8 {
|
2014-11-14 12:50:00 -07:00
|
|
|
ts.set_sec(ns / 1000000000)
|
2014-11-14 10:55:10 -07:00
|
|
|
ts.set_nsec(int32(ns % 1000000000))
|
2014-11-14 10:10:52 -07:00
|
|
|
} else {
|
|
|
|
ts.tv_nsec = 0
|
2014-11-14 12:50:00 -07:00
|
|
|
ts.set_sec(int64(timediv(ns, 1000000000, (*int32)(unsafe.Pointer(&ts.tv_nsec)))))
|
2014-11-14 10:10:52 -07:00
|
|
|
}
|
2014-11-11 15:08:54 -07:00
|
|
|
futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, unsafe.Pointer(&ts), nil, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any procs are sleeping on addr, wake up at most cnt.
|
|
|
|
//go:nosplit
|
|
|
|
func futexwakeup(addr *uint32, cnt uint32) {
|
|
|
|
ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE, cnt, nil, nil, 0)
|
|
|
|
if ret >= 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// I don't know that futex wakeup can return
|
|
|
|
// EAGAIN or EINTR, but if it does, it would be
|
|
|
|
// safe to loop and call futex again.
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:08:54 -07:00
|
|
|
print("futexwakeup addr=", addr, " returned ", ret, "\n")
|
|
|
|
})
|
|
|
|
|
|
|
|
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
|
|
|
|
}
|
|
|
|
|
|
|
|
func getproccount() int32 {
|
2015-07-22 14:12:26 -06:00
|
|
|
// This buffer is huge (8 kB) but we are on the system stack
|
2015-09-15 10:22:46 -06:00
|
|
|
// and there should be plenty of space (64 kB).
|
2015-07-22 14:12:26 -06:00
|
|
|
// Also this is a leaf, so we're not holding up the memory for long.
|
|
|
|
// See golang.org/issue/11823.
|
|
|
|
// The suggested behavior here is to keep trying with ever-larger
|
|
|
|
// buffers, but we don't have a dynamic memory allocator at the
|
|
|
|
// moment, so that's a bit tricky and seems like overkill.
|
2015-09-15 10:22:46 -06:00
|
|
|
const maxCPUs = 64 * 1024
|
2015-11-11 10:39:30 -07:00
|
|
|
var buf [maxCPUs / (sys.PtrSize * 8)]uintptr
|
2014-11-11 15:08:54 -07:00
|
|
|
r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
|
|
|
|
n := int32(0)
|
2015-11-11 10:39:30 -07:00
|
|
|
for _, v := range buf[:r/sys.PtrSize] {
|
2015-07-22 14:12:26 -06:00
|
|
|
for v != 0 {
|
2014-11-11 15:08:54 -07:00
|
|
|
n += int32(v & 1)
|
|
|
|
v >>= 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
n = 1
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clone, the Linux rfork.
|
|
|
|
const (
|
|
|
|
_CLONE_VM = 0x100
|
|
|
|
_CLONE_FS = 0x200
|
|
|
|
_CLONE_FILES = 0x400
|
|
|
|
_CLONE_SIGHAND = 0x800
|
|
|
|
_CLONE_PTRACE = 0x2000
|
|
|
|
_CLONE_VFORK = 0x4000
|
|
|
|
_CLONE_PARENT = 0x8000
|
|
|
|
_CLONE_THREAD = 0x10000
|
|
|
|
_CLONE_NEWNS = 0x20000
|
|
|
|
_CLONE_SYSVSEM = 0x40000
|
|
|
|
_CLONE_SETTLS = 0x80000
|
|
|
|
_CLONE_PARENT_SETTID = 0x100000
|
|
|
|
_CLONE_CHILD_CLEARTID = 0x200000
|
|
|
|
_CLONE_UNTRACED = 0x800000
|
|
|
|
_CLONE_CHILD_SETTID = 0x1000000
|
|
|
|
_CLONE_STOPPED = 0x2000000
|
|
|
|
_CLONE_NEWUTS = 0x4000000
|
|
|
|
_CLONE_NEWIPC = 0x8000000
|
2015-03-25 18:50:35 -06:00
|
|
|
|
|
|
|
cloneFlags = _CLONE_VM | /* share memory */
|
|
|
|
_CLONE_FS | /* share cwd, etc */
|
|
|
|
_CLONE_FILES | /* share fd table */
|
|
|
|
_CLONE_SIGHAND | /* share sig handler table */
|
|
|
|
_CLONE_THREAD /* revisit - okay for now */
|
2014-11-11 15:08:54 -07:00
|
|
|
)
|
|
|
|
|
2016-04-05 20:51:55 -06:00
|
|
|
//go:noescape
|
2016-07-11 17:05:57 -06:00
|
|
|
func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
|
2016-04-05 20:51:55 -06:00
|
|
|
|
2015-03-29 08:20:54 -06:00
|
|
|
// May run with m.p==nil, so write barriers are not allowed.
|
runtime: disallow write barriers in handoffp and callees
handoffp by definition runs without a P, so it's not allowed to have
write barriers. It doesn't have any right now, but mark it
nowritebarrier to disallow any creeping in in the future. handoffp in
turns calls startm, newm, and newosproc, all of which are "below Go"
and make sense to run without a P, so disallow write barriers in these
as well.
For most functions, we've done this because they may race with
stoptheworld() and hence must not have write barriers. For these
functions, it's a little different: the world can't stop while we're
in handoffp, so this race isn't present. But we implement this
restriction with a somewhat broader rule that you can't have a write
barrier without a P. We like this rule because it's simple and means
that our write barriers can depend on there being a P, even though
this rule is actually a little broader than necessary. Hence, even
though there's no danger of the race in these functions, we want to
adhere to the broader rule.
Change-Id: Ie22319c30eea37d703eb52f5c7ca5da872030b88
Reviewed-on: https://go-review.googlesource.com/8130
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Minux Ma <minux@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-26 13:50:22 -06:00
|
|
|
//go:nowritebarrier
|
2014-11-11 15:08:54 -07:00
|
|
|
func newosproc(mp *m, stk unsafe.Pointer) {
|
|
|
|
/*
|
|
|
|
* note: strace gets confused if we use CLONE_PTRACE here.
|
|
|
|
*/
|
|
|
|
if false {
|
2015-11-12 15:26:19 -07:00
|
|
|
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, " ostk=", &mp, "\n")
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Disable signals during clone, so that the new thread starts
|
2016-03-01 16:21:55 -07:00
|
|
|
// with signals disabled. It will enable them in minit.
|
2014-11-11 15:08:54 -07:00
|
|
|
var oset sigset
|
2016-09-23 18:54:51 -06:00
|
|
|
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
|
2015-03-25 18:50:35 -06:00
|
|
|
ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
|
2016-09-23 18:54:51 -06:00
|
|
|
sigprocmask(_SIG_SETMASK, &oset, nil)
|
2014-11-11 15:08:54 -07:00
|
|
|
|
|
|
|
if ret < 0 {
|
|
|
|
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
|
2016-06-28 18:06:59 -06:00
|
|
|
if ret == -_EAGAIN {
|
|
|
|
println("runtime: may need to increase max user processes (ulimit -u)")
|
|
|
|
}
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("newosproc")
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-17 18:27:07 -06:00
|
|
|
// Version of newosproc that doesn't require a valid G.
|
2015-03-25 18:50:35 -06:00
|
|
|
//go:nosplit
|
2015-04-17 18:27:07 -06:00
|
|
|
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
|
2015-04-16 15:32:18 -06:00
|
|
|
stack := sysAlloc(stacksize, &memstats.stacks_sys)
|
2015-03-25 18:50:35 -06:00
|
|
|
if stack == nil {
|
|
|
|
write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
|
|
|
|
exit(1)
|
|
|
|
}
|
2015-04-17 18:27:07 -06:00
|
|
|
ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
|
2015-03-25 18:50:35 -06:00
|
|
|
if ret < 0 {
|
|
|
|
write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
|
|
|
|
exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
|
|
|
|
var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
|
|
|
|
|
2016-04-14 10:12:45 -06:00
|
|
|
const (
|
2016-04-14 10:32:28 -06:00
|
|
|
_AT_NULL = 0 // End of vector
|
2016-04-14 11:27:36 -06:00
|
|
|
_AT_PAGESZ = 6 // System physical page size
|
2016-04-14 10:32:28 -06:00
|
|
|
_AT_RANDOM = 25 // introduced in 2.6.29
|
2016-04-14 10:12:45 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
func sysargs(argc int32, argv **byte) {
|
|
|
|
n := argc + 1
|
|
|
|
|
|
|
|
// skip over argv, envp to get to auxv
|
|
|
|
for argv_index(argv, n) != nil {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip NULL separator
|
|
|
|
n++
|
|
|
|
|
|
|
|
// now argv+n is auxv
|
|
|
|
auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
|
|
|
|
for i := 0; auxv[i] != _AT_NULL; i += 2 {
|
|
|
|
tag, val := auxv[i], auxv[i+1]
|
2016-04-14 10:32:28 -06:00
|
|
|
switch tag {
|
|
|
|
case _AT_RANDOM:
|
|
|
|
// The kernel provides a pointer to 16-bytes
|
|
|
|
// worth of random data.
|
|
|
|
startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
|
2016-04-14 11:27:36 -06:00
|
|
|
|
|
|
|
case _AT_PAGESZ:
|
2016-07-18 19:40:02 -06:00
|
|
|
physPageSize = val
|
2016-04-14 10:32:28 -06:00
|
|
|
}
|
2016-04-14 11:27:36 -06:00
|
|
|
|
2016-04-14 10:12:45 -06:00
|
|
|
archauxv(tag, val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-11 15:08:54 -07:00
|
|
|
func osinit() {
|
|
|
|
ncpu = getproccount()
|
|
|
|
}
|
|
|
|
|
2015-01-08 16:30:22 -07:00
|
|
|
var urandom_dev = []byte("/dev/urandom\x00")
|
2014-11-11 15:08:54 -07:00
|
|
|
|
2014-12-09 15:40:40 -07:00
|
|
|
func getRandomData(r []byte) {
|
|
|
|
if startupRandomData != nil {
|
|
|
|
n := copy(r, startupRandomData)
|
|
|
|
extendRandom(r, n)
|
2014-11-11 15:08:54 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
|
2014-12-09 15:40:40 -07:00
|
|
|
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
|
2015-04-13 17:37:04 -06:00
|
|
|
closefd(fd)
|
2014-12-09 15:40:40 -07:00
|
|
|
extendRandom(r, int(n))
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func goenvs() {
|
|
|
|
goenvs_unix()
|
|
|
|
}
|
|
|
|
|
2015-12-26 10:51:59 -07:00
|
|
|
// Called to do synchronous initialization of Go code built with
|
|
|
|
// -buildmode=c-archive or -buildmode=c-shared.
|
|
|
|
// None of the Go runtime is initialized.
|
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrierrec
|
|
|
|
func libpreinit() {
|
|
|
|
initsig(true)
|
|
|
|
}
|
|
|
|
|
2014-11-11 15:08:54 -07:00
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
|
|
|
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
|
|
|
|
func mpreinit(mp *m) {
|
|
|
|
mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
|
|
|
|
mp.gsignal.m = mp
|
|
|
|
}
|
|
|
|
|
2015-06-22 10:32:05 -06:00
|
|
|
func gettid() uint32
|
|
|
|
|
2014-11-11 15:08:54 -07:00
|
|
|
// Called to initialize a new m (including the bootstrap m).
|
2016-01-27 13:49:13 -07:00
|
|
|
// Called on the new thread, cannot allocate memory.
|
2014-11-11 15:08:54 -07:00
|
|
|
func minit() {
|
2016-09-26 12:14:41 -06:00
|
|
|
minitSignals()
|
runtime: don't always unblock all signals
Ian proposed an improved way of handling signals masks in Go, motivated
by a problem where the Android java runtime expects certain signals to
be blocked for all JVM threads. Discussion here
https://groups.google.com/forum/#!topic/golang-dev/_TSCkQHJt6g
Ian's text is used in the following:
A Go program always needs to have the synchronous signals enabled.
These are the signals for which _SigPanic is set in sigtable, namely
SIGSEGV, SIGBUS, SIGFPE.
A Go program that uses the os/signal package, and calls signal.Notify,
needs to have at least one thread which is not blocking that signal,
but it doesn't matter much which one.
Unix programs do not change signal mask across execve. They inherit
signal masks across fork. The shell uses this fact to some extent;
for example, the job control signals (SIGTTIN, SIGTTOU, SIGTSTP) are
blocked for commands run due to backquote quoting or $().
Our current position on signal masks was not thought out. We wandered
into step by step, e.g., http://golang.org/cl/7323067 .
This CL does the following:
Introduce a new platform hook, msigsave, that saves the signal mask of
the current thread to m.sigsave.
Call msigsave from needm and newm.
In minit grab set up the signal mask from m.sigsave and unblock the
essential synchronous signals, and SIGILL, SIGTRAP, SIGPROF, SIGSTKFLT
(for systems that have it).
In unminit, restore the signal mask from m.sigsave.
The first time that os/signal.Notify is called, start a new thread whose
only purpose is to update its signal mask to make sure signals for
signal.Notify are unblocked on at least one thread.
The effect on Go programs will be that if they are invoked with some
non-synchronous signals blocked, those signals will normally be
ignored. Previously, those signals would mostly be ignored. A change
in behaviour will occur for programs started with any of these signals
blocked, if they receive the signal: SIGHUP, SIGINT, SIGQUIT, SIGABRT,
SIGTERM. Previously those signals would always cause a crash (unless
using the os/signal package); with this change, they will be ignored
if the program is started with the signal blocked (and does not use
the os/signal package).
./all.bash completes successfully on linux/amd64.
OpenBSD is missing the implementation.
Change-Id: I188098ba7eb85eae4c14861269cc466f2aa40e8c
Reviewed-on: https://go-review.googlesource.com/10173
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-05-18 03:00:24 -06:00
|
|
|
|
2015-06-22 10:32:05 -06:00
|
|
|
// for debuggers, in case cgo created the thread
|
2016-09-26 12:14:41 -06:00
|
|
|
getg().m.procid = uint64(gettid())
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Called from dropm to undo the effect of an minit.
|
2015-11-13 14:21:01 -07:00
|
|
|
//go:nosplit
|
2014-11-11 15:08:54 -07:00
|
|
|
func unminit() {
|
2016-09-26 12:35:55 -06:00
|
|
|
unminitSignals()
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func memlimit() uintptr {
|
|
|
|
/*
|
|
|
|
TODO: Convert to Go when something actually uses the result.
|
|
|
|
|
|
|
|
Rlimit rl;
|
|
|
|
extern byte runtime·text[], runtime·end[];
|
|
|
|
uintptr used;
|
|
|
|
|
|
|
|
if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
|
|
|
|
return 0;
|
|
|
|
if(rl.rlim_cur >= 0x7fffffff)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Estimate our VM footprint excluding the heap.
|
|
|
|
// Not an exact science: use size of binary plus
|
|
|
|
// some room for thread stacks.
|
|
|
|
used = runtime·end - runtime·text + (64<<20);
|
|
|
|
if(used >= rl.rlim_cur)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// If there's not at least 16 MB left, we're probably
|
2016-03-01 16:21:55 -07:00
|
|
|
// not going to be able to do much. Treat as no limit.
|
2014-11-11 15:08:54 -07:00
|
|
|
rl.rlim_cur -= used;
|
|
|
|
if(rl.rlim_cur < (16<<20))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return rl.rlim_cur - used;
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
//#ifdef GOARCH_386
|
|
|
|
//#define sa_handler k_sa_handler
|
|
|
|
//#endif
|
|
|
|
|
|
|
|
func sigreturn()
|
2016-07-11 17:05:57 -06:00
|
|
|
func sigtramp(sig uint32, info *siginfo, ctx unsafe.Pointer)
|
2015-12-11 18:16:48 -07:00
|
|
|
func cgoSigtramp()
|
2014-11-11 15:08:54 -07:00
|
|
|
|
2016-04-05 20:51:55 -06:00
|
|
|
//go:noescape
|
|
|
|
func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
|
|
|
|
|
|
|
|
//go:noescape
|
2016-09-25 14:38:54 -06:00
|
|
|
func sigaltstack(new, old *stackt)
|
2016-04-05 20:51:55 -06:00
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func setitimer(mode int32, new, old *itimerval)
|
|
|
|
|
|
|
|
//go:noescape
|
2016-09-23 18:54:51 -06:00
|
|
|
func rtsigprocmask(how int32, new, old *sigset, size int32)
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrierrec
|
|
|
|
func sigprocmask(how int32, new, old *sigset) {
|
|
|
|
rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
|
|
|
|
}
|
2016-04-05 20:51:55 -06:00
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func getrlimit(kind int32, limit unsafe.Pointer) int32
|
|
|
|
func raise(sig int32)
|
|
|
|
func raiseproc(sig int32)
|
|
|
|
|
|
|
|
//go:noescape
|
|
|
|
func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
|
|
|
|
func osyield()
|
|
|
|
|
2015-12-26 10:51:59 -07:00
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrierrec
|
2014-11-11 15:08:54 -07:00
|
|
|
func setsig(i int32, fn uintptr, restart bool) {
|
|
|
|
var sa sigactiont
|
|
|
|
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER
|
|
|
|
if restart {
|
|
|
|
sa.sa_flags |= _SA_RESTART
|
|
|
|
}
|
2015-09-24 06:42:10 -06:00
|
|
|
sigfillset(&sa.sa_mask)
|
2014-11-11 15:08:54 -07:00
|
|
|
// Although Linux manpage says "sa_restorer element is obsolete and
|
|
|
|
// should not be used". x86_64 kernel requires it. Only use it on
|
|
|
|
// x86.
|
|
|
|
if GOARCH == "386" || GOARCH == "amd64" {
|
|
|
|
sa.sa_restorer = funcPC(sigreturn)
|
|
|
|
}
|
|
|
|
if fn == funcPC(sighandler) {
|
2015-12-11 18:16:48 -07:00
|
|
|
if iscgo {
|
|
|
|
fn = funcPC(cgoSigtramp)
|
|
|
|
} else {
|
|
|
|
fn = funcPC(sigtramp)
|
|
|
|
}
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
sa.sa_handler = fn
|
2015-12-26 10:51:59 -07:00
|
|
|
rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask))
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
|
|
|
|
2015-12-26 10:51:59 -07:00
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrierrec
|
2014-12-19 14:16:17 -07:00
|
|
|
func setsigstack(i int32) {
|
|
|
|
var sa sigactiont
|
|
|
|
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("rt_sigaction failure")
|
2014-12-19 14:16:17 -07:00
|
|
|
}
|
|
|
|
if sa.sa_handler == 0 || sa.sa_handler == _SIG_DFL || sa.sa_handler == _SIG_IGN || sa.sa_flags&_SA_ONSTACK != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sa.sa_flags |= _SA_ONSTACK
|
|
|
|
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("rt_sigaction failure")
|
2014-12-19 14:16:17 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-26 10:51:59 -07:00
|
|
|
//go:nosplit
|
|
|
|
//go:nowritebarrierrec
|
2014-11-11 15:08:54 -07:00
|
|
|
func getsig(i int32) uintptr {
|
|
|
|
var sa sigactiont
|
|
|
|
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("rt_sigaction read failure")
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
2015-12-11 18:16:48 -07:00
|
|
|
if sa.sa_handler == funcPC(sigtramp) || sa.sa_handler == funcPC(cgoSigtramp) {
|
2014-11-11 15:08:54 -07:00
|
|
|
return funcPC(sighandler)
|
|
|
|
}
|
|
|
|
return sa.sa_handler
|
|
|
|
}
|
|
|
|
|
2016-09-25 14:38:54 -06:00
|
|
|
// setSignaltstackSP sets the ss_sp field of a stackt.
|
2015-11-13 14:21:01 -07:00
|
|
|
//go:nosplit
|
2016-09-25 14:38:54 -06:00
|
|
|
func setSignalstackSP(s *stackt, sp uintptr) {
|
|
|
|
s.ss_sp = (*byte)(unsafe.Pointer(sp))
|
2014-11-11 15:08:54 -07:00
|
|
|
}
|
2016-09-25 22:33:27 -06:00
|
|
|
|
|
|
|
func (c *sigctxt) fixsigcode(sig uint32) {
|
|
|
|
}
|