diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 5f92e7480e..483ea0aee5 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -468,8 +468,7 @@ func mallocinit() { // Initialize the heap. mheap_.init() - _g_ := getg() - _g_.m.mcache = allocmcache() + mcache0 = allocmcache() // Create initial arena growth hints. if sys.PtrSize == 8 { @@ -953,7 +952,19 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { shouldhelpgc := false dataSize := size - c := gomcache() + var c *mcache + if mp.p != 0 { + c = mp.p.ptr().mcache + } else { + // We will be called without a P while bootstrapping, + // in which case we use mcache0, which is set in mallocinit. + // mcache0 is cleared when bootstrapping is complete, + // by procresize. + c = mcache0 + if c == nil { + throw("malloc called with no P") + } + } var x unsafe.Pointer noscan := typ == nil || typ.ptrdata == 0 if size <= maxSmallSize { @@ -1193,7 +1204,7 @@ func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { } func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { - mp.mcache.next_sample = nextSample() + mp.p.ptr().mcache.next_sample = nextSample() mProf_Malloc(x, size) } diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index b95c7f13a4..fd9bf8f864 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -226,7 +226,7 @@ func (s *mspan) sweep(preserve bool) bool { size := s.elemsize res := false - c := _g_.m.mcache + c := _g_.m.p.ptr().mcache freeToHeap := false // The allocBits indicate which unmarked objects don't need to be diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 5427d8839d..86ecf3377d 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1141,10 +1141,21 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS // which may only be done with the heap locked. // Transfer stats from mcache to global. - memstats.heap_scan += uint64(gp.m.mcache.local_scan) - gp.m.mcache.local_scan = 0 - memstats.tinyallocs += uint64(gp.m.mcache.local_tinyallocs) - gp.m.mcache.local_tinyallocs = 0 + var c *mcache + if gp.m.p != 0 { + c = gp.m.p.ptr().mcache + } else { + // This case occurs while bootstrapping. + // See the similar code in mallocgc. + c = mcache0 + if c == nil { + throw("mheap.allocSpan called with no P") + } + } + memstats.heap_scan += uint64(c.local_scan) + c.local_scan = 0 + memstats.tinyallocs += uint64(c.local_tinyallocs) + c.local_tinyallocs = 0 // Do some additional accounting if it's a large allocation. if spanclass.sizeclass() == 0 { @@ -1342,12 +1353,12 @@ func (h *mheap) grow(npage uintptr) bool { // Free the span back into the heap. func (h *mheap) freeSpan(s *mspan) { systemstack(func() { - mp := getg().m + c := getg().m.p.ptr().mcache lock(&h.lock) - memstats.heap_scan += uint64(mp.mcache.local_scan) - mp.mcache.local_scan = 0 - memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs) - mp.mcache.local_tinyallocs = 0 + memstats.heap_scan += uint64(c.local_scan) + c.local_scan = 0 + memstats.tinyallocs += uint64(c.local_tinyallocs) + c.local_tinyallocs = 0 if msanenabled { // Tell msan that this entire span is no longer in use. base := unsafe.Pointer(s.base()) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 2a91e82185..2174564637 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -82,6 +82,7 @@ var modinfo string var ( m0 m g0 g + mcache0 *mcache raceprocctx0 uintptr ) @@ -2957,7 +2958,6 @@ func reentersyscall(pc, sp uintptr) { _g_.m.syscalltick = _g_.m.p.ptr().syscalltick _g_.sysblocktraced = true - _g_.m.mcache = nil pp := _g_.m.p.ptr() pp.m = 0 _g_.m.oldp.set(pp) @@ -3083,9 +3083,6 @@ func exitsyscall() { oldp := _g_.m.oldp.ptr() _g_.m.oldp = 0 if exitsyscallfast(oldp) { - if _g_.m.mcache == nil { - throw("lost mcache") - } if trace.enabled { if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { systemstack(traceGoStart) @@ -3136,10 +3133,6 @@ func exitsyscall() { // Call the scheduler. mcall(exitsyscall0) - if _g_.m.mcache == nil { - throw("lost mcache") - } - // Scheduler returned, so we're allowed to run now. // Delete the syscallsp information that we left for // the garbage collector during the system call. @@ -4033,10 +4026,12 @@ func (pp *p) init(id int32) { pp.wbBuf.reset() if pp.mcache == nil { if id == 0 { - if getg().m.mcache == nil { + if mcache0 == nil { throw("missing mcache?") } - pp.mcache = getg().m.mcache // bootstrap + // Use the bootstrap mcache0. Only one P will get + // mcache0: the one with ID 0. + pp.mcache = mcache0 } else { pp.mcache = allocmcache() } @@ -4216,7 +4211,6 @@ func procresize(nprocs int32) *p { _g_.m.p.ptr().m = 0 } _g_.m.p = 0 - _g_.m.mcache = nil p := allp[0] p.m = 0 p.status = _Pidle @@ -4226,6 +4220,9 @@ func procresize(nprocs int32) *p { } } + // g.m.p is now set, so we no longer need mcache0 for bootstrapping. + mcache0 = nil + // release resources from unused P's for i := nprocs; i < old; i++ { p := allp[i] @@ -4291,7 +4288,7 @@ func acquirep(_p_ *p) { func wirep(_p_ *p) { _g_ := getg() - if _g_.m.p != 0 || _g_.m.mcache != nil { + if _g_.m.p != 0 { throw("wirep: already in go") } if _p_.m != 0 || _p_.status != _Pidle { @@ -4302,7 +4299,6 @@ func wirep(_p_ *p) { print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") throw("wirep: invalid p state") } - _g_.m.mcache = _p_.mcache _g_.m.p.set(_p_) _p_.m.set(_g_.m) _p_.status = _Prunning @@ -4312,19 +4308,18 @@ func wirep(_p_ *p) { func releasep() *p { _g_ := getg() - if _g_.m.p == 0 || _g_.m.mcache == nil { + if _g_.m.p == 0 { throw("releasep: invalid arg") } _p_ := _g_.m.p.ptr() - if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { - print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") + if _p_.m.ptr() != _g_.m || _p_.status != _Prunning { + print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n") throw("releasep: invalid p state") } if trace.enabled { traceProcStop(_g_.m.p.ptr()) } _g_.m.p = 0 - _g_.m.mcache = nil _p_.m = 0 _p_.status = _Pidle return _p_ diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 88a99fc08b..c65a534ef6 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -459,11 +459,6 @@ func releasem(mp *m) { } } -//go:nosplit -func gomcache() *mcache { - return getg().m.mcache -} - //go:linkname reflect_typelinks reflect.typelinks func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { modules := activeModules() diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 99eb19eb0c..9e3ccb2e40 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -510,7 +510,6 @@ type m struct { park note alllink *m // on allm schedlink muintptr - mcache *mcache lockedg guintptr createstack [32]uintptr // stack that created this thread. lockedExt uint32 // tracking for external LockOSThread diff --git a/src/runtime/stack.go b/src/runtime/stack.go index ebbe3e013d..e72a75cdef 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -357,16 +357,16 @@ func stackalloc(n uint32) stack { n2 >>= 1 } var x gclinkptr - c := thisg.m.mcache - if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" { - // c == nil can happen in the guts of exitsyscall or - // procresize. Just get a stack from the global pool. + if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { + // thisg.m.p == 0 can happen in the guts of exitsyscall + // or procresize. Just get a stack from the global pool. // Also don't touch stackcache during gc // as it's flushed concurrently. lock(&stackpool[order].item.mu) x = stackpoolalloc(order) unlock(&stackpool[order].item.mu) } else { + c := thisg.m.p.ptr().mcache x = c.stackcache[order].list if x.ptr() == nil { stackcacherefill(c, order) @@ -452,12 +452,12 @@ func stackfree(stk stack) { n2 >>= 1 } x := gclinkptr(v) - c := gp.m.mcache - if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" { + if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { lock(&stackpool[order].item.mu) stackpoolfree(x, order) unlock(&stackpool[order].item.mu) } else { + c := gp.m.p.ptr().mcache if c.stackcache[order].size >= _StackCacheSize { stackcacherelease(c, order) }