2014-11-11 15:05:02 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2016-03-01 16:21:55 -07:00
|
|
|
// Fixed-size object allocator. Returned memory is not zeroed.
|
2014-11-11 15:05:02 -07:00
|
|
|
//
|
2015-03-11 13:58:47 -06:00
|
|
|
// See malloc.go for overview.
|
2014-11-11 15:05:02 -07:00
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// FixAlloc is a simple free-list allocator for fixed size objects.
|
2018-02-22 16:51:10 -07:00
|
|
|
// Malloc uses a FixAlloc wrapped around sysAlloc to manage its
|
2015-02-19 11:38:46 -07:00
|
|
|
// MCache and MSpan objects.
|
|
|
|
//
|
runtime: make fixalloc zero allocations on reuse
Currently fixalloc does not zero memory it reuses. This is dangerous
with the hybrid barrier if the type may contain heap pointers, since
it may cause us to observe a dead heap pointer on reuse. It's also
error-prone since it's the only allocator that doesn't zero on
allocation (mallocgc of course zeroes, but so do persistentalloc and
sysAlloc). It's also largely pointless: for mcache, the caller
immediately memclrs the allocation; and the two specials types are
tiny so there's no real cost to zeroing them.
Change fixalloc to zero allocations by default.
The only type we don't zero by default is mspan. This actually
requires that the spsn's sweepgen survive across freeing and
reallocating a span. If we were to zero it, the following race would
be possible:
1. The current sweepgen is 2. Span s is on the unswept list.
2. Direct sweeping sweeps span s, finds it's all free, and releases s
to the fixalloc.
3. Thread 1 allocates s from fixalloc. Suppose this zeros s, including
s.sweepgen.
4. Thread 1 calls s.init, which sets s.state to _MSpanDead.
5. On thread 2, background sweeping comes across span s in allspans
and cas's s.sweepgen from 0 (sg-2) to 1 (sg-1). Now it thinks it
owns it for sweeping. 6. Thread 1 continues initializing s.
Everything breaks.
I would like to fix this because it's obviously confusing, but it's a
subtle enough problem that I'm leaving it alone for now. The solution
may be to skip sweepgen 0, but then we have to think about wrap-around
much more carefully.
Updates #17503.
Change-Id: Ie08691feed3abbb06a31381b94beb0a2e36a0613
Reviewed-on: https://go-review.googlesource.com/31368
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-09-25 15:12:43 -06:00
|
|
|
// Memory returned by fixalloc.alloc is zeroed by default, but the
|
|
|
|
// caller may take responsibility for zeroing allocations by setting
|
|
|
|
// the zero flag to false. This is only safe if the memory never
|
|
|
|
// contains heap pointers.
|
|
|
|
//
|
2015-02-19 11:38:46 -07:00
|
|
|
// The caller is responsible for locking around FixAlloc calls.
|
|
|
|
// Callers can keep state in the object but the first word is
|
|
|
|
// smashed by freeing and reallocating.
|
2016-10-11 20:58:21 -06:00
|
|
|
//
|
|
|
|
// Consider marking fixalloc'd types go:notinheap.
|
2015-02-19 11:38:46 -07:00
|
|
|
type fixalloc struct {
|
|
|
|
size uintptr
|
2015-10-16 17:21:26 -06:00
|
|
|
first func(arg, p unsafe.Pointer) // called first time p is returned
|
2015-02-19 11:38:46 -07:00
|
|
|
arg unsafe.Pointer
|
|
|
|
list *mlink
|
2017-03-27 12:20:35 -06:00
|
|
|
chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
|
2015-02-19 11:38:46 -07:00
|
|
|
nchunk uint32
|
|
|
|
inuse uintptr // in-use bytes now
|
|
|
|
stat *uint64
|
runtime: make fixalloc zero allocations on reuse
Currently fixalloc does not zero memory it reuses. This is dangerous
with the hybrid barrier if the type may contain heap pointers, since
it may cause us to observe a dead heap pointer on reuse. It's also
error-prone since it's the only allocator that doesn't zero on
allocation (mallocgc of course zeroes, but so do persistentalloc and
sysAlloc). It's also largely pointless: for mcache, the caller
immediately memclrs the allocation; and the two specials types are
tiny so there's no real cost to zeroing them.
Change fixalloc to zero allocations by default.
The only type we don't zero by default is mspan. This actually
requires that the spsn's sweepgen survive across freeing and
reallocating a span. If we were to zero it, the following race would
be possible:
1. The current sweepgen is 2. Span s is on the unswept list.
2. Direct sweeping sweeps span s, finds it's all free, and releases s
to the fixalloc.
3. Thread 1 allocates s from fixalloc. Suppose this zeros s, including
s.sweepgen.
4. Thread 1 calls s.init, which sets s.state to _MSpanDead.
5. On thread 2, background sweeping comes across span s in allspans
and cas's s.sweepgen from 0 (sg-2) to 1 (sg-1). Now it thinks it
owns it for sweeping. 6. Thread 1 continues initializing s.
Everything breaks.
I would like to fix this because it's obviously confusing, but it's a
subtle enough problem that I'm leaving it alone for now. The solution
may be to skip sweepgen 0, but then we have to think about wrap-around
much more carefully.
Updates #17503.
Change-Id: Ie08691feed3abbb06a31381b94beb0a2e36a0613
Reviewed-on: https://go-review.googlesource.com/31368
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-09-25 15:12:43 -06:00
|
|
|
zero bool // zero allocations
|
2015-02-19 11:38:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
|
2016-01-27 13:49:13 -07:00
|
|
|
// Since assignments to mlink.next will result in a write barrier being performed
|
|
|
|
// this cannot be used by some of the internal GC structures. For example when
|
2015-02-19 11:38:46 -07:00
|
|
|
// the sweeper is placing an unmarked object on the free list it does not want the
|
|
|
|
// write barrier to be called since that could result in the object being reachable.
|
2016-10-11 20:58:21 -06:00
|
|
|
//
|
|
|
|
//go:notinheap
|
2015-02-19 11:38:46 -07:00
|
|
|
type mlink struct {
|
|
|
|
next *mlink
|
|
|
|
}
|
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
// Initialize f to allocate objects of the given size,
|
|
|
|
// using the allocator to obtain chunks of memory.
|
2015-11-11 17:13:51 -07:00
|
|
|
func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
|
2014-11-11 15:05:02 -07:00
|
|
|
f.size = size
|
2015-10-16 17:21:26 -06:00
|
|
|
f.first = first
|
2014-11-11 15:05:02 -07:00
|
|
|
f.arg = arg
|
|
|
|
f.list = nil
|
2017-03-27 12:20:35 -06:00
|
|
|
f.chunk = 0
|
2014-11-11 15:05:02 -07:00
|
|
|
f.nchunk = 0
|
|
|
|
f.inuse = 0
|
|
|
|
f.stat = stat
|
runtime: make fixalloc zero allocations on reuse
Currently fixalloc does not zero memory it reuses. This is dangerous
with the hybrid barrier if the type may contain heap pointers, since
it may cause us to observe a dead heap pointer on reuse. It's also
error-prone since it's the only allocator that doesn't zero on
allocation (mallocgc of course zeroes, but so do persistentalloc and
sysAlloc). It's also largely pointless: for mcache, the caller
immediately memclrs the allocation; and the two specials types are
tiny so there's no real cost to zeroing them.
Change fixalloc to zero allocations by default.
The only type we don't zero by default is mspan. This actually
requires that the spsn's sweepgen survive across freeing and
reallocating a span. If we were to zero it, the following race would
be possible:
1. The current sweepgen is 2. Span s is on the unswept list.
2. Direct sweeping sweeps span s, finds it's all free, and releases s
to the fixalloc.
3. Thread 1 allocates s from fixalloc. Suppose this zeros s, including
s.sweepgen.
4. Thread 1 calls s.init, which sets s.state to _MSpanDead.
5. On thread 2, background sweeping comes across span s in allspans
and cas's s.sweepgen from 0 (sg-2) to 1 (sg-1). Now it thinks it
owns it for sweeping. 6. Thread 1 continues initializing s.
Everything breaks.
I would like to fix this because it's obviously confusing, but it's a
subtle enough problem that I'm leaving it alone for now. The solution
may be to skip sweepgen 0, but then we have to think about wrap-around
much more carefully.
Updates #17503.
Change-Id: Ie08691feed3abbb06a31381b94beb0a2e36a0613
Reviewed-on: https://go-review.googlesource.com/31368
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-09-25 15:12:43 -06:00
|
|
|
f.zero = true
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-11-11 17:13:51 -07:00
|
|
|
func (f *fixalloc) alloc() unsafe.Pointer {
|
2014-11-11 15:05:02 -07:00
|
|
|
if f.size == 0 {
|
|
|
|
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: internal error")
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if f.list != nil {
|
|
|
|
v := unsafe.Pointer(f.list)
|
|
|
|
f.list = f.list.next
|
|
|
|
f.inuse += f.size
|
runtime: make fixalloc zero allocations on reuse
Currently fixalloc does not zero memory it reuses. This is dangerous
with the hybrid barrier if the type may contain heap pointers, since
it may cause us to observe a dead heap pointer on reuse. It's also
error-prone since it's the only allocator that doesn't zero on
allocation (mallocgc of course zeroes, but so do persistentalloc and
sysAlloc). It's also largely pointless: for mcache, the caller
immediately memclrs the allocation; and the two specials types are
tiny so there's no real cost to zeroing them.
Change fixalloc to zero allocations by default.
The only type we don't zero by default is mspan. This actually
requires that the spsn's sweepgen survive across freeing and
reallocating a span. If we were to zero it, the following race would
be possible:
1. The current sweepgen is 2. Span s is on the unswept list.
2. Direct sweeping sweeps span s, finds it's all free, and releases s
to the fixalloc.
3. Thread 1 allocates s from fixalloc. Suppose this zeros s, including
s.sweepgen.
4. Thread 1 calls s.init, which sets s.state to _MSpanDead.
5. On thread 2, background sweeping comes across span s in allspans
and cas's s.sweepgen from 0 (sg-2) to 1 (sg-1). Now it thinks it
owns it for sweeping. 6. Thread 1 continues initializing s.
Everything breaks.
I would like to fix this because it's obviously confusing, but it's a
subtle enough problem that I'm leaving it alone for now. The solution
may be to skip sweepgen 0, but then we have to think about wrap-around
much more carefully.
Updates #17503.
Change-Id: Ie08691feed3abbb06a31381b94beb0a2e36a0613
Reviewed-on: https://go-review.googlesource.com/31368
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-09-25 15:12:43 -06:00
|
|
|
if f.zero {
|
2016-10-17 16:41:56 -06:00
|
|
|
memclrNoHeapPointers(v, f.size)
|
runtime: make fixalloc zero allocations on reuse
Currently fixalloc does not zero memory it reuses. This is dangerous
with the hybrid barrier if the type may contain heap pointers, since
it may cause us to observe a dead heap pointer on reuse. It's also
error-prone since it's the only allocator that doesn't zero on
allocation (mallocgc of course zeroes, but so do persistentalloc and
sysAlloc). It's also largely pointless: for mcache, the caller
immediately memclrs the allocation; and the two specials types are
tiny so there's no real cost to zeroing them.
Change fixalloc to zero allocations by default.
The only type we don't zero by default is mspan. This actually
requires that the spsn's sweepgen survive across freeing and
reallocating a span. If we were to zero it, the following race would
be possible:
1. The current sweepgen is 2. Span s is on the unswept list.
2. Direct sweeping sweeps span s, finds it's all free, and releases s
to the fixalloc.
3. Thread 1 allocates s from fixalloc. Suppose this zeros s, including
s.sweepgen.
4. Thread 1 calls s.init, which sets s.state to _MSpanDead.
5. On thread 2, background sweeping comes across span s in allspans
and cas's s.sweepgen from 0 (sg-2) to 1 (sg-1). Now it thinks it
owns it for sweeping. 6. Thread 1 continues initializing s.
Everything breaks.
I would like to fix this because it's obviously confusing, but it's a
subtle enough problem that I'm leaving it alone for now. The solution
may be to skip sweepgen 0, but then we have to think about wrap-around
much more carefully.
Updates #17503.
Change-Id: Ie08691feed3abbb06a31381b94beb0a2e36a0613
Reviewed-on: https://go-review.googlesource.com/31368
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2016-09-25 15:12:43 -06:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
return v
|
|
|
|
}
|
|
|
|
if uintptr(f.nchunk) < f.size {
|
2017-03-27 12:20:35 -06:00
|
|
|
f.chunk = uintptr(persistentalloc(_FixAllocChunk, 0, f.stat))
|
2014-11-11 15:05:02 -07:00
|
|
|
f.nchunk = _FixAllocChunk
|
|
|
|
}
|
|
|
|
|
2017-03-27 12:20:35 -06:00
|
|
|
v := unsafe.Pointer(f.chunk)
|
2014-11-11 15:05:02 -07:00
|
|
|
if f.first != nil {
|
2015-10-16 17:21:26 -06:00
|
|
|
f.first(f.arg, v)
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2017-03-27 12:20:35 -06:00
|
|
|
f.chunk = f.chunk + f.size
|
2014-11-11 15:05:02 -07:00
|
|
|
f.nchunk -= uint32(f.size)
|
|
|
|
f.inuse += f.size
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2015-11-11 17:13:51 -07:00
|
|
|
func (f *fixalloc) free(p unsafe.Pointer) {
|
2014-11-11 15:05:02 -07:00
|
|
|
f.inuse -= f.size
|
|
|
|
v := (*mlink)(p)
|
|
|
|
v.next = f.list
|
|
|
|
f.list = v
|
|
|
|
}
|