mirror of
https://github.com/golang/go
synced 2024-11-18 20:04:52 -07:00
runtime: disable scavenger on 64k page size kernels
Update #9993 If the physical page size of the machine is larger than the logical heap size, for example 8k logical, 64k physical, then madvise(2) will round up the requested amount to a 64k boundary and may discard pages close to the page being madvised. This patch disables the scavenger in these situations, which at the moment is only ppc64 and ppc64le systems. NaCl also uses a 64k page size, but it's not clear if it is affected by this problem. Change-Id: Ib897f8d3df5bd915ddc0b510f2fd90a30ef329ca Reviewed-on: https://go-review.googlesource.com/6091 Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
This commit is contained in:
parent
045f9df466
commit
f9cc72ccfe
@ -88,6 +88,10 @@ func TestReadGCStats(t *testing.T) {
|
||||
var big = make([]byte, 1<<20)
|
||||
|
||||
func TestFreeOSMemory(t *testing.T) {
|
||||
switch runtime.GOARCH {
|
||||
case "ppc64", "ppc64le", "nacl":
|
||||
t.Skip("issue 9993; scavenger temporarily disabled on systems with 64k pages")
|
||||
}
|
||||
var ms1, ms2 runtime.MemStats
|
||||
|
||||
if big == nil {
|
||||
|
@ -717,6 +717,15 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
|
||||
}
|
||||
|
||||
func scavengelist(list *mspan, now, limit uint64) uintptr {
|
||||
if _PhysPageSize > _PageSize {
|
||||
// golang.org/issue/9993
|
||||
// If the physical page size of the machine is larger than
|
||||
// our logical heap page size the kernel may round up the
|
||||
// amount to be freed to its page size and corrupt the heap
|
||||
// pages surrounding the unused block.
|
||||
return 0
|
||||
}
|
||||
|
||||
if mSpanList_IsEmpty(list) {
|
||||
return 0
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user