mirror of
https://github.com/golang/go
synced 2024-11-20 08:14:41 -07:00
61f56e925e
When we grow the heap, we create a temporary "in use" span for the memory acquired from the OS and then free that span to link it into the heap. Hence, we (1) increase pagesInUse when we make the temporary span so that (2) freeing the span will correctly decrease it. However, currently step (1) increases pagesInUse by the number of pages requested from the heap, while step (2) decreases it by the number of pages requested from the OS (the size of the temporary span). These aren't necessarily the same, since we round up the number of pages we request from the OS, so steps 1 and 2 don't necessarily cancel out like they're supposed to. Over time, this can add up and cause pagesInUse to underflow and wrap around to 2^64. The garbage collector computes the sweep ratio from this, so if this happens, the sweep ratio becomes effectively infinite, causing the first allocation on each P in a sweep cycle to sweep the entire heap. This makes sweeping effectively STW. Fix this by increasing pagesInUse in step 1 by the number of pages requested from the OS, so that the two steps correctly cancel out. We add a test that checks that the running total matches the actual state of the heap. Fixes #15022. For 1.6.x. Change-Id: Iefd9d6abe37d0d447cbdbdf9941662e4f18eeffc Reviewed-on: https://go-review.googlesource.com/21280 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
215 lines
4.2 KiB
Go
215 lines
4.2 KiB
Go
// Copyright 2010 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Export guts for testing.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/atomic"
|
|
"runtime/internal/sys"
|
|
"unsafe"
|
|
)
|
|
|
|
var Fadd64 = fadd64
|
|
var Fsub64 = fsub64
|
|
var Fmul64 = fmul64
|
|
var Fdiv64 = fdiv64
|
|
var F64to32 = f64to32
|
|
var F32to64 = f32to64
|
|
var Fcmp64 = fcmp64
|
|
var Fintto64 = fintto64
|
|
var F64toint = f64toint
|
|
var Sqrt = sqrt
|
|
|
|
var Entersyscall = entersyscall
|
|
var Exitsyscall = exitsyscall
|
|
var LockedOSThread = lockedOSThread
|
|
var Xadduintptr = atomic.Xadduintptr
|
|
|
|
var FuncPC = funcPC
|
|
|
|
var Fastlog2 = fastlog2
|
|
|
|
type LFNode struct {
|
|
Next uint64
|
|
Pushcnt uintptr
|
|
}
|
|
|
|
func LFStackPush(head *uint64, node *LFNode) {
|
|
lfstackpush(head, (*lfnode)(unsafe.Pointer(node)))
|
|
}
|
|
|
|
func LFStackPop(head *uint64) *LFNode {
|
|
return (*LFNode)(unsafe.Pointer(lfstackpop(head)))
|
|
}
|
|
|
|
func GCMask(x interface{}) (ret []byte) {
|
|
systemstack(func() {
|
|
ret = getgcmask(x)
|
|
})
|
|
return
|
|
}
|
|
|
|
func RunSchedLocalQueueTest() {
|
|
_p_ := new(p)
|
|
gs := make([]g, len(_p_.runq))
|
|
for i := 0; i < len(_p_.runq); i++ {
|
|
if g, _ := runqget(_p_); g != nil {
|
|
throw("runq is not empty initially")
|
|
}
|
|
for j := 0; j < i; j++ {
|
|
runqput(_p_, &gs[i], false)
|
|
}
|
|
for j := 0; j < i; j++ {
|
|
if g, _ := runqget(_p_); g != &gs[i] {
|
|
print("bad element at iter ", i, "/", j, "\n")
|
|
throw("bad element")
|
|
}
|
|
}
|
|
if g, _ := runqget(_p_); g != nil {
|
|
throw("runq is not empty afterwards")
|
|
}
|
|
}
|
|
}
|
|
|
|
func RunSchedLocalQueueStealTest() {
|
|
p1 := new(p)
|
|
p2 := new(p)
|
|
gs := make([]g, len(p1.runq))
|
|
for i := 0; i < len(p1.runq); i++ {
|
|
for j := 0; j < i; j++ {
|
|
gs[j].sig = 0
|
|
runqput(p1, &gs[j], false)
|
|
}
|
|
gp := runqsteal(p2, p1, true)
|
|
s := 0
|
|
if gp != nil {
|
|
s++
|
|
gp.sig++
|
|
}
|
|
for {
|
|
gp, _ = runqget(p2)
|
|
if gp == nil {
|
|
break
|
|
}
|
|
s++
|
|
gp.sig++
|
|
}
|
|
for {
|
|
gp, _ = runqget(p1)
|
|
if gp == nil {
|
|
break
|
|
}
|
|
gp.sig++
|
|
}
|
|
for j := 0; j < i; j++ {
|
|
if gs[j].sig != 1 {
|
|
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
|
|
throw("bad element")
|
|
}
|
|
}
|
|
if s != i/2 && s != i/2+1 {
|
|
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
|
|
throw("bad steal")
|
|
}
|
|
}
|
|
}
|
|
|
|
var StringHash = stringHash
|
|
var BytesHash = bytesHash
|
|
var Int32Hash = int32Hash
|
|
var Int64Hash = int64Hash
|
|
var EfaceHash = efaceHash
|
|
var IfaceHash = ifaceHash
|
|
var MemclrBytes = memclrBytes
|
|
|
|
var HashLoad = &hashLoad
|
|
|
|
// entry point for testing
|
|
func GostringW(w []uint16) (s string) {
|
|
systemstack(func() {
|
|
s = gostringw(&w[0])
|
|
})
|
|
return
|
|
}
|
|
|
|
var Gostringnocopy = gostringnocopy
|
|
var Maxstring = &maxstring
|
|
|
|
type Uintreg sys.Uintreg
|
|
|
|
var Open = open
|
|
var Close = closefd
|
|
var Read = read
|
|
var Write = write
|
|
|
|
func Envs() []string { return envs }
|
|
func SetEnvs(e []string) { envs = e }
|
|
|
|
var BigEndian = sys.BigEndian
|
|
|
|
// For benchmarking.
|
|
|
|
func BenchSetType(n int, x interface{}) {
|
|
e := *efaceOf(&x)
|
|
t := e._type
|
|
var size uintptr
|
|
var p unsafe.Pointer
|
|
switch t.kind & kindMask {
|
|
case kindPtr:
|
|
t = (*ptrtype)(unsafe.Pointer(t)).elem
|
|
size = t.size
|
|
p = e.data
|
|
case kindSlice:
|
|
slice := *(*struct {
|
|
ptr unsafe.Pointer
|
|
len, cap uintptr
|
|
})(e.data)
|
|
t = (*slicetype)(unsafe.Pointer(t)).elem
|
|
size = t.size * slice.len
|
|
p = slice.ptr
|
|
}
|
|
allocSize := roundupsize(size)
|
|
systemstack(func() {
|
|
for i := 0; i < n; i++ {
|
|
heapBitsSetType(uintptr(p), allocSize, size, t)
|
|
}
|
|
})
|
|
}
|
|
|
|
const PtrSize = sys.PtrSize
|
|
|
|
var TestingAssertE2I2GC = &testingAssertE2I2GC
|
|
var TestingAssertE2T2GC = &testingAssertE2T2GC
|
|
|
|
var ForceGCPeriod = &forcegcperiod
|
|
|
|
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
|
|
// the "environment" traceback level, so later calls to
|
|
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
|
|
func SetTracebackEnv(level string) {
|
|
setTraceback(level)
|
|
traceback_env = traceback_cache
|
|
}
|
|
|
|
var ReadUnaligned32 = readUnaligned32
|
|
var ReadUnaligned64 = readUnaligned64
|
|
|
|
func CountPagesInUse() (pagesInUse, counted uintptr) {
|
|
stopTheWorld("CountPagesInUse")
|
|
|
|
pagesInUse = uintptr(mheap_.pagesInUse)
|
|
|
|
for _, s := range h_allspans {
|
|
if s.state == mSpanInUse {
|
|
counted += s.npages
|
|
}
|
|
}
|
|
|
|
startTheWorld()
|
|
|
|
return
|
|
}
|