mirror of
https://github.com/golang/go
synced 2024-11-24 07:40:17 -07:00
runtime: update and access scavengeGoal atomically
The first step toward acquiring the heap lock less frequently in the scavenger. Change-Id: Idc69fd8602be2c83268c155951230d60e20b42fe Reviewed-on: https://go-review.googlesource.com/c/go/+/353973 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
9b2dd1f771
commit
fc5e8cd6c9
@ -125,7 +125,7 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
|
||||
// information about the heap yet) so this is fine, and avoids a fault
|
||||
// or garbage data later.
|
||||
if lastHeapGoal == 0 {
|
||||
mheap_.scavengeGoal = ^uint64(0)
|
||||
atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
|
||||
return
|
||||
}
|
||||
// Compute our scavenging goal.
|
||||
@ -157,10 +157,10 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
|
||||
// the background scavenger. We disable the background scavenger if there's
|
||||
// less than one physical page of work to do because it's not worth it.
|
||||
if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
|
||||
mheap_.scavengeGoal = ^uint64(0)
|
||||
atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
|
||||
return
|
||||
}
|
||||
mheap_.scavengeGoal = retainedGoal
|
||||
atomic.Store64(&mheap_.scavengeGoal, retainedGoal)
|
||||
}
|
||||
|
||||
// Sleep/wait state of the background scavenger.
|
||||
@ -299,7 +299,7 @@ func bgscavenge(c chan int) {
|
||||
lock(&mheap_.lock)
|
||||
|
||||
// If background scavenging is disabled or if there's no work to do just park.
|
||||
retained, goal := heapRetained(), mheap_.scavengeGoal
|
||||
retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal)
|
||||
if retained <= goal {
|
||||
unlock(&mheap_.lock)
|
||||
return
|
||||
|
@ -111,6 +111,8 @@ type mheap struct {
|
||||
// scavengeGoal is the amount of total retained heap memory (measured by
|
||||
// heapRetained) that the runtime will try to maintain by returning memory
|
||||
// to the OS.
|
||||
//
|
||||
// Accessed atomically.
|
||||
scavengeGoal uint64
|
||||
|
||||
// Page reclaimer state
|
||||
@ -1399,9 +1401,10 @@ func (h *mheap) grow(npage uintptr) bool {
|
||||
// By scavenging inline we deal with the failure to allocate out of
|
||||
// memory fragments by scavenging the memory fragments that are least
|
||||
// likely to be re-used.
|
||||
if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
|
||||
scavengeGoal := atomic.Load64(&h.scavengeGoal)
|
||||
if retained := heapRetained(); retained+uint64(totalGrowth) > scavengeGoal {
|
||||
todo := totalGrowth
|
||||
if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
|
||||
if overage := uintptr(retained + uint64(totalGrowth) - scavengeGoal); todo > overage {
|
||||
todo = overage
|
||||
}
|
||||
h.pages.scavenge(todo, false)
|
||||
|
Loading…
Reference in New Issue
Block a user