1
0
mirror of https://github.com/golang/go synced 2024-11-19 12:54:45 -07:00

runtime: change lockedg/lockedm to guintptr/muintptr

This change has no real effect in itself. This is to prepare for a
followup change that will call lockOSThread during a cgo callback when
there is no p assigned, and therefore when lockOSThread can not use a
write barrier.

Change-Id: Ia122d41acf54191864bcb68f393f2ed3b2f87abc
Reviewed-on: https://go-review.googlesource.com/63630
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Crawshaw <crawshaw@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Ian Lance Taylor 2017-09-13 10:14:02 -07:00
parent 27e80f7c4d
commit 165c15afa3
5 changed files with 25 additions and 25 deletions

View File

@ -429,7 +429,7 @@ func badctxt() {
func lockedOSThread() bool {
gp := getg()
return gp.lockedm != nil && gp.m.lockedg != nil
return gp.lockedm != 0 && gp.m.lockedg != 0
}
var (
@ -1504,8 +1504,8 @@ func oneNewExtraM() {
gp.m = mp
mp.curg = gp
mp.locked = _LockInternal
mp.lockedg = gp
gp.lockedm = mp
mp.lockedg.set(gp)
gp.lockedm.set(mp)
gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
@ -1819,7 +1819,7 @@ func wakep() {
func stoplockedm() {
_g_ := getg()
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
throw("stoplockedm: inconsistent locking")
}
if _g_.m.p != 0 {
@ -1831,7 +1831,7 @@ func stoplockedm() {
// Wait until another thread schedules lockedg again.
notesleep(&_g_.m.park)
noteclear(&_g_.m.park)
status := readgstatus(_g_.m.lockedg)
status := readgstatus(_g_.m.lockedg.ptr())
if status&^_Gscan != _Grunnable {
print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
dumpgstatus(_g_)
@ -1847,7 +1847,7 @@ func stoplockedm() {
func startlockedm(gp *g) {
_g_ := getg()
mp := gp.lockedm
mp := gp.lockedm.ptr()
if mp == _g_.m {
throw("startlockedm: locked to me")
}
@ -2214,9 +2214,9 @@ func schedule() {
throw("schedule: holding locks")
}
if _g_.m.lockedg != nil {
if _g_.m.lockedg != 0 {
stoplockedm()
execute(_g_.m.lockedg, false) // Never returns.
execute(_g_.m.lockedg.ptr(), false) // Never returns.
}
top:
@ -2267,7 +2267,7 @@ top:
resetspinning()
}
if gp.lockedm != nil {
if gp.lockedm != 0 {
// Hands off own p to the locked m,
// then blocks waiting for a new p.
startlockedm(gp)
@ -2386,8 +2386,8 @@ func goexit0(gp *g) {
atomic.Xadd(&sched.ngsys, -1)
}
gp.m = nil
gp.lockedm = nil
_g_.m.lockedg = nil
gp.lockedm = 0
_g_.m.lockedg = 0
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
@ -2815,7 +2815,7 @@ func exitsyscall0(gp *g) {
acquirep(_p_)
execute(gp, false) // Never returns.
}
if _g_.m.lockedg != nil {
if _g_.m.lockedg != 0 {
// Wait until another thread schedules gp and so m again.
stoplockedm()
execute(gp, false) // Never returns.
@ -3165,8 +3165,8 @@ func Breakpoint() {
//go:nosplit
func dolockOSThread() {
_g_ := getg()
_g_.m.lockedg = _g_
_g_.lockedm = _g_.m
_g_.m.lockedg.set(_g_)
_g_.lockedm.set(_g_.m)
}
//go:nosplit
@ -3194,8 +3194,8 @@ func dounlockOSThread() {
if _g_.m.locked != 0 {
return
}
_g_.m.lockedg = nil
_g_.lockedm = nil
_g_.m.lockedg = 0
_g_.lockedm = 0
}
//go:nosplit
@ -4102,7 +4102,7 @@ func schedtrace(detailed bool) {
for mp := allm; mp != nil; mp = mp.alllink {
_p_ := mp.p.ptr()
gp := mp.curg
lockedg := mp.lockedg
lockedg := mp.lockedg.ptr()
id1 := int32(-1)
if _p_ != nil {
id1 = _p_.id
@ -4122,7 +4122,7 @@ func schedtrace(detailed bool) {
for gi := 0; gi < len(allgs); gi++ {
gp := allgs[gi]
mp := gp.m
lockedm := gp.lockedm
lockedm := gp.lockedm.ptr()
id1 := int32(-1)
if mp != nil {
id1 = mp.id

View File

@ -357,7 +357,7 @@ type g struct {
sysexitticks int64 // cputicks when syscall has returned (for tracing)
traceseq uint64 // trace event sequencer
tracelastp puintptr // last P emitted an event for this goroutine
lockedm *m
lockedm muintptr
sig uint32
writebuf []byte
sigcode0 uintptr
@ -423,7 +423,7 @@ type m struct {
alllink *m // on allm
schedlink muintptr
mcache *mcache
lockedg *g
lockedg guintptr
createstack [32]uintptr // stack that created this thread.
freglo [16]uint32 // d[i] lsb and f[i]
freghi [16]uint32 // d[i] msb and f[i+16]

View File

@ -88,9 +88,9 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
}
print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n")
if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
if _g_.m.lockedg != 0 && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
print("signal arrived during cgo execution\n")
gp = _g_.m.lockedg
gp = _g_.m.lockedg.ptr()
}
print("\n")

View File

@ -126,11 +126,11 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
print("Exception ", hex(info.exceptioncode), " ", hex(info.exceptioninformation[0]), " ", hex(info.exceptioninformation[1]), " ", hex(r.ip()), "\n")
print("PC=", hex(r.ip()), "\n")
if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
if _g_.m.lockedg != 0 && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
if iscgo {
print("signal arrived during external code execution\n")
}
gp = _g_.m.lockedg
gp = _g_.m.lockedg.ptr()
}
print("\n")

View File

@ -795,7 +795,7 @@ func goroutineheader(gp *g) {
if waitfor >= 1 {
print(", ", waitfor, " minutes")
}
if gp.lockedm != nil {
if gp.lockedm != 0 {
print(", locked to thread")
}
print("]:\n")