mirror of
https://github.com/golang/go
synced 2024-11-20 02:14:46 -07:00
e4ac2d4acc
This is a renaming of the field ref to the more appropriate allocCount. The field holds the number of objects in the span that are currently allocated. Some throws strings were adjusted to more accurately convey the meaning of allocCount. Change-Id: I10daf44e3e9cc24a10912638c7de3c1984ef8efe Reviewed-on: https://go-review.googlesource.com/19518 Reviewed-by: Austin Clements <austin@google.com>
147 lines
4.1 KiB
Go
147 lines
4.1 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import "unsafe"
|
|
|
|
// Per-thread (in Go, per-P) cache for small objects.
|
|
// No locking needed because it is per-thread (per-P).
|
|
//
|
|
// mcaches are allocated from non-GC'd memory, so any heap pointers
|
|
// must be specially handled.
|
|
type mcache struct {
|
|
// The following members are accessed on every malloc,
|
|
// so they are grouped here for better caching.
|
|
next_sample int32 // trigger heap sample after allocating this many bytes
|
|
local_scan uintptr // bytes of scannable heap allocated
|
|
|
|
// Allocator cache for tiny objects w/o pointers.
|
|
// See "Tiny allocator" comment in malloc.go.
|
|
|
|
// tiny points to the beginning of the current tiny block, or
|
|
// nil if there is no current tiny block.
|
|
//
|
|
// tiny is a heap pointer. Since mcache is in non-GC'd memory,
|
|
// we handle it by clearing it in releaseAll during mark
|
|
// termination.
|
|
tiny uintptr
|
|
tinyoffset uintptr
|
|
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
|
|
|
|
// The rest is not accessed on every malloc.
|
|
alloc [_NumSizeClasses]*mspan // spans to allocate from
|
|
|
|
stackcache [_NumStackOrders]stackfreelist
|
|
|
|
// Local allocator stats, flushed during GC.
|
|
local_nlookup uintptr // number of pointer lookups
|
|
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
|
|
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
|
|
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
|
|
}
|
|
|
|
// A gclink is a node in a linked list of blocks, like mlink,
|
|
// but it is opaque to the garbage collector.
|
|
// The GC does not trace the pointers during collection,
|
|
// and the compiler does not emit write barriers for assignments
|
|
// of gclinkptr values. Code should store references to gclinks
|
|
// as gclinkptr, not as *gclink.
|
|
type gclink struct {
|
|
next gclinkptr
|
|
}
|
|
|
|
// A gclinkptr is a pointer to a gclink, but it is opaque
|
|
// to the garbage collector.
|
|
type gclinkptr uintptr
|
|
|
|
// ptr returns the *gclink form of p.
|
|
// The result should be used for accessing fields, not stored
|
|
// in other data structures.
|
|
func (p gclinkptr) ptr() *gclink {
|
|
return (*gclink)(unsafe.Pointer(p))
|
|
}
|
|
|
|
type stackfreelist struct {
|
|
list gclinkptr // linked list of free stacks
|
|
size uintptr // total size of stacks in list
|
|
}
|
|
|
|
// dummy MSpan that contains no free objects.
|
|
var emptymspan mspan
|
|
|
|
func allocmcache() *mcache {
|
|
lock(&mheap_.lock)
|
|
c := (*mcache)(mheap_.cachealloc.alloc())
|
|
unlock(&mheap_.lock)
|
|
memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
|
|
for i := 0; i < _NumSizeClasses; i++ {
|
|
c.alloc[i] = &emptymspan
|
|
}
|
|
c.next_sample = nextSample()
|
|
return c
|
|
}
|
|
|
|
func freemcache(c *mcache) {
|
|
systemstack(func() {
|
|
c.releaseAll()
|
|
stackcache_clear(c)
|
|
|
|
// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
|
|
// with the stealing of gcworkbufs during garbage collection to avoid
|
|
// a race where the workbuf is double-freed.
|
|
// gcworkbuffree(c.gcworkbuf)
|
|
|
|
lock(&mheap_.lock)
|
|
purgecachedstats(c)
|
|
mheap_.cachealloc.free(unsafe.Pointer(c))
|
|
unlock(&mheap_.lock)
|
|
})
|
|
}
|
|
|
|
// Gets a span that has a free object in it and assigns it
|
|
// to be the cached span for the given sizeclass. Returns this span.
|
|
func (c *mcache) refill(sizeclass int32) *mspan {
|
|
_g_ := getg()
|
|
|
|
_g_.m.locks++
|
|
// Return the current cached span to the central lists.
|
|
s := c.alloc[sizeclass]
|
|
|
|
if uintptr(s.allocCount) != s.nelems {
|
|
throw("refill of span with free space remaining")
|
|
}
|
|
|
|
if s != &emptymspan {
|
|
s.incache = false
|
|
}
|
|
|
|
// Get a new cached span from the central lists.
|
|
s = mheap_.central[sizeclass].mcentral.cacheSpan()
|
|
if s == nil {
|
|
throw("out of memory")
|
|
}
|
|
|
|
if uintptr(s.allocCount) == s.nelems {
|
|
throw("span has no free space")
|
|
}
|
|
|
|
c.alloc[sizeclass] = s
|
|
_g_.m.locks--
|
|
return s
|
|
}
|
|
|
|
func (c *mcache) releaseAll() {
|
|
for i := 0; i < _NumSizeClasses; i++ {
|
|
s := c.alloc[i]
|
|
if s != &emptymspan {
|
|
mheap_.central[i].mcentral.uncacheSpan(s)
|
|
c.alloc[i] = &emptymspan
|
|
}
|
|
}
|
|
// Clear tinyalloc pool.
|
|
c.tiny = 0
|
|
c.tinyoffset = 0
|
|
}
|