1
0
mirror of https://github.com/golang/go synced 2024-11-05 12:06:15 -07:00

runtime: delete hugepage tracking dead code

After the previous CL, this is now all dead code. This change is
separated out to make the previous one easy to backport.

For #63334.
Related to #61718 and #59960.

Change-Id: I109673ed97c62c472bbe2717dfeeb5aa4fc883ea
Reviewed-on: https://go-review.googlesource.com/c/go/+/532117
Reviewed-by: Michael Pratt <mpratt@google.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Michael Anthony Knyszek 2023-10-02 15:42:15 +00:00 committed by Gopher Robot
parent 595deec3dd
commit 765dfb00b2
2 changed files with 5 additions and 53 deletions

View File

@ -1829,10 +1829,6 @@ func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
s.i.setEmpty(chunkIdx(ci)) s.i.setEmpty(chunkIdx(ci))
} }
func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) {
s.i.setNoHugePage(chunkIdx(ci))
}
func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool { func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
sc0 := scavChunkData{ sc0 := scavChunkData{
gen: gen, gen: gen,

View File

@ -769,10 +769,6 @@ func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintpt
p.chunkOf(ci).allocRange(base, npages) p.chunkOf(ci).allocRange(base, npages)
p.update(addr, uintptr(npages), true, true) p.update(addr, uintptr(npages), true, true)
// Grab whether the chunk is hugepage backed and if it is,
// clear it. We're about to break up this huge page.
p.scav.index.setNoHugePage(ci)
// With that done, it's safe to unlock. // With that done, it's safe to unlock.
unlock(p.mheapLock) unlock(p.mheapLock)
@ -1141,16 +1137,11 @@ func (s *scavengeIndex) find(force bool) (chunkIdx, uint) {
func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) { func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) {
sc := s.chunks[ci].load() sc := s.chunks[ci].load()
sc.alloc(npages, s.gen) sc.alloc(npages, s.gen)
if !sc.isHugePage() && sc.inUse > scavChunkHiOccPages { // TODO(mknyszek): Consider eagerly backing memory with huge pages
// Mark that we're considering this chunk as backed by huge pages. // here and track whether we believe this chunk is backed by huge pages.
sc.setHugePage() // In the past we've attempted to use sysHugePageCollapse (which uses
// MADV_COLLAPSE on Linux, and is unsupported elswhere) for this purpose,
// TODO(mknyszek): Consider eagerly backing memory with huge pages // but that caused performance issues in production environments.
// here. In the past we've attempted to use sysHugePageCollapse
// (which uses MADV_COLLAPSE on Linux, and is unsupported elswhere)
// for this purpose, but that caused performance issues in production
// environments.
}
s.chunks[ci].store(sc) s.chunks[ci].store(sc)
} }
@ -1204,19 +1195,6 @@ func (s *scavengeIndex) setEmpty(ci chunkIdx) {
s.chunks[ci].store(val) s.chunks[ci].store(val)
} }
// setNoHugePage updates the backed-by-hugepages status of a particular chunk.
// Returns true if the set was successful (not already backed by huge pages).
//
// setNoHugePage may only run concurrently with find.
func (s *scavengeIndex) setNoHugePage(ci chunkIdx) {
val := s.chunks[ci].load()
if !val.isHugePage() {
return
}
val.setNoHugePage()
s.chunks[ci].store(val)
}
// atomicScavChunkData is an atomic wrapper around a scavChunkData // atomicScavChunkData is an atomic wrapper around a scavChunkData
// that stores it in its packed form. // that stores it in its packed form.
type atomicScavChunkData struct { type atomicScavChunkData struct {
@ -1285,13 +1263,6 @@ const (
// file. The reason we say "HasFree" here is so the zero value is // file. The reason we say "HasFree" here is so the zero value is
// correct for a newly-grown chunk. (New memory is scavenged.) // correct for a newly-grown chunk. (New memory is scavenged.)
scavChunkHasFree scavChunkFlags = 1 << iota scavChunkHasFree scavChunkFlags = 1 << iota
// scavChunkNoHugePage indicates whether this chunk has had any huge
// pages broken by the scavenger.
//
// The negative here is unfortunate, but necessary to make it so that
// the zero value of scavChunkData accurately represents the state of
// a newly-grown chunk. (New memory is marked as backed by huge pages.)
scavChunkNoHugePage
// scavChunkMaxFlags is the maximum number of flags we can have, given how // scavChunkMaxFlags is the maximum number of flags we can have, given how
// a scavChunkData is packed into 8 bytes. // a scavChunkData is packed into 8 bytes.
@ -1324,21 +1295,6 @@ func (sc *scavChunkFlags) setNonEmpty() {
*sc |= scavChunkHasFree *sc |= scavChunkHasFree
} }
// isHugePage returns false if the noHugePage flag is set.
func (sc *scavChunkFlags) isHugePage() bool {
return (*sc)&scavChunkNoHugePage == 0
}
// setHugePage clears the noHugePage flag.
func (sc *scavChunkFlags) setHugePage() {
*sc &^= scavChunkNoHugePage
}
// setNoHugePage sets the noHugePage flag.
func (sc *scavChunkFlags) setNoHugePage() {
*sc |= scavChunkNoHugePage
}
// shouldScavenge returns true if the corresponding chunk should be interrogated // shouldScavenge returns true if the corresponding chunk should be interrogated
// by the scavenger. // by the scavenger.
func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool { func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool {