2014-11-11 15:05:02 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup.
|
|
|
|
// It has gotten completely out of control.
|
|
|
|
|
|
|
|
// Garbage collector (GC).
|
|
|
|
//
|
2014-12-09 11:25:45 -07:00
|
|
|
// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
|
|
|
|
// GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
|
2014-11-15 06:00:38 -07:00
|
|
|
// non-generational and non-compacting. Allocation is done using size segregated per P allocation
|
|
|
|
// areas to minimize fragmentation while eliminating locks in the common case.
|
2014-11-11 15:05:02 -07:00
|
|
|
//
|
2014-11-15 06:00:38 -07:00
|
|
|
// The algorithm decomposes into several steps.
|
|
|
|
// This is a high level description of the algorithm being used. For an overview of GC a good
|
|
|
|
// place to start is Richard Jones' gchandbook.org.
|
|
|
|
//
|
|
|
|
// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
|
|
|
|
// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
|
2014-12-09 08:15:18 -07:00
|
|
|
// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
|
2014-12-09 11:25:45 -07:00
|
|
|
// 966-975.
|
2014-11-15 06:00:38 -07:00
|
|
|
// For journal quality proofs that these steps are complete, correct, and terminate see
|
|
|
|
// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
|
|
|
|
// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
|
2014-11-11 15:05:02 -07:00
|
|
|
//
|
2014-11-15 06:00:38 -07:00
|
|
|
// 0. Set phase = GCscan from GCoff.
|
|
|
|
// 1. Wait for all P's to acknowledge phase change.
|
|
|
|
// At this point all goroutines have passed through a GC safepoint and
|
|
|
|
// know we are in the GCscan phase.
|
|
|
|
// 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
|
2014-12-09 11:25:45 -07:00
|
|
|
// (marking avoids most duplicate enqueuing but races may produce benign duplication).
|
2014-11-15 06:00:38 -07:00
|
|
|
// Preempted goroutines are scanned before P schedules next goroutine.
|
|
|
|
// 3. Set phase = GCmark.
|
|
|
|
// 4. Wait for all P's to acknowledge phase change.
|
|
|
|
// 5. Now write barrier marks and enqueues black, grey, or white to white pointers.
|
|
|
|
// Malloc still allocates white (non-marked) objects.
|
|
|
|
// 6. Meanwhile GC transitively walks the heap marking reachable objects.
|
|
|
|
// 7. When GC finishes marking heap, it preempts P's one-by-one and
|
|
|
|
// retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
|
|
|
|
// currently scheduled on the P).
|
|
|
|
// 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
|
|
|
|
// 9. Wait for all P's to acknowledge phase change.
|
|
|
|
// 10. Malloc now allocates black objects, so number of unmarked reachable objects
|
|
|
|
// monotonically decreases.
|
2014-12-09 08:15:18 -07:00
|
|
|
// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet
|
2014-12-09 11:25:45 -07:00
|
|
|
// reachable objects.
|
2014-11-15 06:00:38 -07:00
|
|
|
// 12. When GC completes a full cycle over P's and discovers no new grey
|
|
|
|
// objects, (which means all reachable objects are marked) set phase = GCsweep.
|
|
|
|
// 13. Wait for all P's to acknowledge phase change.
|
|
|
|
// 14. Now malloc allocates white (but sweeps spans before use).
|
|
|
|
// Write barrier becomes nop.
|
|
|
|
// 15. GC does background sweeping, see description below.
|
|
|
|
// 16. When sweeping is complete set phase to GCoff.
|
|
|
|
// 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
|
|
|
|
// see discussion of GC rate below.
|
|
|
|
|
|
|
|
// Changing phases.
|
|
|
|
// Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
|
|
|
|
// All phase action must be benign in the presence of a change.
|
|
|
|
// Starting with GCoff
|
|
|
|
// GCoff to GCscan
|
|
|
|
// GSscan scans stacks and globals greying them and never marks an object black.
|
|
|
|
// Once all the P's are aware of the new phase they will scan gs on preemption.
|
|
|
|
// This means that the scanning of preempted gs can't start until all the Ps
|
|
|
|
// have acknowledged.
|
|
|
|
// GCscan to GCmark
|
|
|
|
// GCMark turns on the write barrier which also only greys objects. No scanning
|
|
|
|
// of objects (making them black) can happen until all the Ps have acknowledged
|
|
|
|
// the phase change.
|
|
|
|
// GCmark to GCmarktermination
|
|
|
|
// The only change here is that we start allocating black so the Ps must acknowledge
|
|
|
|
// the change before we begin the termination algorithm
|
|
|
|
// GCmarktermination to GSsweep
|
|
|
|
// Object currently on the freelist must be marked black for this to work.
|
|
|
|
// Are things on the free lists black or white? How does the sweep phase work?
|
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
// Concurrent sweep.
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
//
|
2014-11-11 15:05:02 -07:00
|
|
|
// The sweep phase proceeds concurrently with normal program execution.
|
|
|
|
// The heap is swept span-by-span both lazily (when a goroutine needs another span)
|
|
|
|
// and concurrently in a background goroutine (this helps programs that are not CPU bound).
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
// At the end of STW mark termination all spans are marked as "needs sweeping".
|
|
|
|
//
|
|
|
|
// The background sweeper goroutine simply sweeps spans one-by-one.
|
|
|
|
//
|
|
|
|
// To avoid requesting more OS memory while there are unswept spans, when a
|
|
|
|
// goroutine needs another span, it first attempts to reclaim that much memory
|
|
|
|
// by sweeping. When a goroutine needs to allocate a new small-object span, it
|
|
|
|
// sweeps small-object spans for the same object size until it frees at least
|
|
|
|
// one object. When a goroutine needs to allocate large-object span from heap,
|
|
|
|
// it sweeps spans until it frees at least that many pages into heap. There is
|
|
|
|
// one case where this may not suffice: if a goroutine sweeps and frees two
|
|
|
|
// nonadjacent one-page spans to the heap, it will allocate a new two-page
|
|
|
|
// span, but there can still be other one-page unswept spans which could be
|
|
|
|
// combined into a two-page span.
|
|
|
|
//
|
2014-11-11 15:05:02 -07:00
|
|
|
// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
|
|
|
|
// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
|
|
|
|
// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
|
|
|
|
// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
|
|
|
|
// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
|
|
|
|
// The finalizer goroutine is kicked off only when all spans are swept.
|
|
|
|
// When the next GC starts, it sweeps all not-yet-swept spans (if any).
|
|
|
|
|
2014-11-15 06:00:38 -07:00
|
|
|
// GC rate.
|
|
|
|
// Next GC is after we've allocated an extra amount of memory proportional to
|
|
|
|
// the amount already in use. The proportion is controlled by GOGC environment variable
|
|
|
|
// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
|
|
|
|
// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
|
|
|
|
// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
|
|
|
|
// (and also the amount of extra memory used).
|
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
package runtime
|
|
|
|
|
|
|
|
import "unsafe"
|
|
|
|
|
|
|
|
const (
|
|
|
|
_DebugGC = 0
|
|
|
|
_ConcurrentSweep = true
|
|
|
|
_FinBlockSize = 4 * 1024
|
|
|
|
_RootData = 0
|
|
|
|
_RootBss = 1
|
|
|
|
_RootFinalizers = 2
|
|
|
|
_RootSpans = 3
|
|
|
|
_RootFlushCaches = 4
|
|
|
|
_RootCount = 5
|
|
|
|
)
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// heapminimum is the minimum number of bytes in the heap.
|
|
|
|
// This cleans up the corner case of where we have a very small live set but a lot
|
|
|
|
// of allocations and collecting every GOGC * live set is expensive.
|
2015-05-06 13:58:20 -06:00
|
|
|
// heapminimum is adjust by multiplying it by GOGC/100. In
|
|
|
|
// the special case of GOGC==0 this will set heapminimum to 0 resulting
|
|
|
|
// collecting at every allocation even when the heap size is small.
|
2015-02-19 11:38:46 -07:00
|
|
|
var heapminimum = uint64(4 << 20)
|
2014-11-15 06:00:38 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Initialized from $GOGC. GOGC=off means no GC.
|
|
|
|
var gcpercent int32
|
2015-01-06 12:58:49 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
func gcinit() {
|
|
|
|
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
|
|
|
|
throw("size of Workbuf is suboptimal")
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
runtime: switch to gcWork abstraction
This converts the garbage collector from directly manipulating work
buffers to using the new gcWork abstraction.
The previous management of work buffers was rather ad hoc. As a
result, switching to the gcWork abstraction changes many details of
work buffer management.
If greyobject fills a work buffer, it can now pull from work.partial
in addition to work.empty.
Previously, gcDrain started with a partial or empty work buffer and
fetched an empty work buffer if it filled its current buffer (in
greyobject). Now, gcDrain starts with a full work buffer and fetches
an partial or empty work buffer if it fills its current buffer (in
greyobject). The original behavior was bad because gcDrain would
immediately drop the empty work buffer returned by greyobject and
fetch a full work buffer, which greyobject was likely to immediately
overflow, fetching another empty work buffer, etc. The new behavior
isn't great at the start because greyobject is likely to immediately
overflow the full buffer, but the steady-state behavior should be more
stable. Both before and after this change, gcDrain fetches a full
work buffer if it drains its current buffer. Basically all of these
choices are bad; the right answer is to use a dual work buffer scheme.
Previously, shade always fetched a work buffer (though usually from
m.currentwbuf), even if the object was already marked. Now it only
fetches a work buffer if it actually greys an object.
Change-Id: I8b880ed660eb63135236fa5d5678f0c1c041881f
Reviewed-on: https://go-review.googlesource.com/5232
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-02-17 08:53:31 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
work.markfor = parforalloc(_MaxGcproc)
|
2015-05-06 13:58:20 -06:00
|
|
|
_ = setGCPercent(readgogc())
|
2015-04-06 18:55:02 -06:00
|
|
|
for datap := &firstmoduledata; datap != nil; datap = datap.next {
|
runtime: replace GC programs with simpler encoding, faster decoder
Small types record the location of pointers in their memory layout
by using a simple bitmap. In Go 1.4 the bitmap held 4-bit entries,
and in Go 1.5 the bitmap holds 1-bit entries, but in both cases using
a bitmap for a large type containing arrays does not make sense:
if someone refers to the type [1<<28]*byte in a program in such
a way that the type information makes it into the binary, it would be
a waste of space to write a 128 MB (for 4-bit entries) or even 32 MB
(for 1-bit entries) bitmap full of 1s into the binary or even to keep
one in memory during the execution of the program.
For large types containing arrays, it is much more compact to describe
the locations of pointers using a notation that can express repetition
than to lay out a bitmap of pointers. Go 1.4 included such a notation,
called ``GC programs'' but it was complex, required recursion during
decoding, and was generally slow. Dmitriy measured the execution of
these programs writing directly to the heap bitmap as being 7x slower
than copying from a preunrolled 4-bit mask (and frankly that code was
not terribly fast either). For some tests, unrollgcprog1 was seen costing
as much as 3x more than the rest of malloc combined.
This CL introduces a different form for the GC programs. They use a
simple Lempel-Ziv-style encoding of the 1-bit pointer information,
in which the only operations are (1) emit the following n bits
and (2) repeat the last n bits c more times. This encoding can be
generated directly from the Go type information (using repetition
only for arrays or large runs of non-pointer data) and it can be decoded
very efficiently. In particular the decoding requires little state and
no recursion, so that the entire decoding can run without any memory
accesses other than the reads of the encoding and the writes of the
decoded form to the heap bitmap. For recursive types like arrays of
arrays of arrays, the inner instructions are only executed once, not
n times, so that large repetitions run at full speed. (In contrast, large
repetitions in the old programs repeated the individual bit-level layout
of the inner data over and over.) The result is as much as 25x faster
decoding compared to the old form.
Because the old decoder was so slow, Go 1.4 had three (or so) cases
for how to set the heap bitmap bits for an allocation of a given type:
(1) If the type had an even number of words up to 32 words, then
the 4-bit pointer mask for the type fit in no more than 16 bytes;
store the 4-bit pointer mask directly in the binary and copy from it.
(1b) If the type had an odd number of words up to 15 words, then
the 4-bit pointer mask for the type, doubled to end on a byte boundary,
fit in no more than 16 bytes; store that doubled mask directly in the
binary and copy from it.
(2) If the type had an even number of words up to 128 words,
or an odd number of words up to 63 words (again due to doubling),
then the 4-bit pointer mask would fit in a 64-byte unrolled mask.
Store a GC program in the binary, but leave space in the BSS for
the unrolled mask. Execute the GC program to construct the mask the
first time it is needed, and thereafter copy from the mask.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
(This is the case that was 7x slower than the other two.)
Because the new pointer masks store 1-bit entries instead of 4-bit
entries and because using the decoder no longer carries a significant
overhead, after this CL (that is, for Go 1.5) there are only two cases:
(1) If the type is 128 words or less (no condition about odd or even),
store the 1-bit pointer mask directly in the binary and use it to
initialize the heap bitmap during malloc. (Implemented in CL 9702.)
(2) There is no case 2 anymore.
(3) Otherwise, store a GC program and execute it to write directly to
the heap bitmap each time an object of that type is allocated.
Executing the GC program directly into the heap bitmap (case (3) above)
was disabled for the Go 1.5 dev cycle, both to avoid needing to use
GC programs for typedmemmove and to avoid updating that code as
the heap bitmap format changed. Typedmemmove no longer uses this
type information; as of CL 9886 it uses the heap bitmap directly.
Now that the heap bitmap format is stable, we reintroduce GC programs
and their space savings.
Benchmarks for heapBitsSetType, before this CL vs this CL:
name old mean new mean delta
SetTypePtr 7.59ns × (0.99,1.02) 5.16ns × (1.00,1.00) -32.05% (p=0.000)
SetTypePtr8 21.0ns × (0.98,1.05) 21.4ns × (1.00,1.00) ~ (p=0.179)
SetTypePtr16 24.1ns × (0.99,1.01) 24.6ns × (1.00,1.00) +2.41% (p=0.001)
SetTypePtr32 31.2ns × (0.99,1.01) 32.4ns × (0.99,1.02) +3.72% (p=0.001)
SetTypePtr64 45.2ns × (1.00,1.00) 47.2ns × (1.00,1.00) +4.42% (p=0.000)
SetTypePtr126 75.8ns × (0.99,1.01) 79.1ns × (1.00,1.00) +4.25% (p=0.000)
SetTypePtr128 74.3ns × (0.99,1.01) 77.6ns × (1.00,1.01) +4.55% (p=0.000)
SetTypePtrSlice 726ns × (1.00,1.01) 712ns × (1.00,1.00) -1.95% (p=0.001)
SetTypeNode1 20.0ns × (0.99,1.01) 20.7ns × (1.00,1.00) +3.71% (p=0.000)
SetTypeNode1Slice 112ns × (1.00,1.00) 113ns × (0.99,1.00) ~ (p=0.070)
SetTypeNode8 23.9ns × (1.00,1.00) 24.7ns × (1.00,1.01) +3.18% (p=0.000)
SetTypeNode8Slice 294ns × (0.99,1.02) 287ns × (0.99,1.01) -2.38% (p=0.015)
SetTypeNode64 52.8ns × (0.99,1.03) 51.8ns × (0.99,1.01) ~ (p=0.069)
SetTypeNode64Slice 1.13µs × (0.99,1.05) 1.14µs × (0.99,1.00) ~ (p=0.767)
SetTypeNode64Dead 36.0ns × (1.00,1.01) 32.5ns × (0.99,1.00) -9.67% (p=0.000)
SetTypeNode64DeadSlice 1.43µs × (0.99,1.01) 1.40µs × (1.00,1.00) -2.39% (p=0.001)
SetTypeNode124 75.7ns × (1.00,1.01) 79.0ns × (1.00,1.00) +4.44% (p=0.000)
SetTypeNode124Slice 1.94µs × (1.00,1.01) 2.04µs × (0.99,1.01) +4.98% (p=0.000)
SetTypeNode126 75.4ns × (1.00,1.01) 77.7ns × (0.99,1.01) +3.11% (p=0.000)
SetTypeNode126Slice 1.95µs × (0.99,1.01) 2.03µs × (1.00,1.00) +3.74% (p=0.000)
SetTypeNode128 85.4ns × (0.99,1.01) 122.0ns × (1.00,1.00) +42.89% (p=0.000)
SetTypeNode128Slice 2.20µs × (1.00,1.01) 2.36µs × (0.98,1.02) +7.48% (p=0.001)
SetTypeNode130 83.3ns × (1.00,1.00) 123.0ns × (1.00,1.00) +47.61% (p=0.000)
SetTypeNode130Slice 2.30µs × (0.99,1.01) 2.40µs × (0.98,1.01) +4.37% (p=0.000)
SetTypeNode1024 498ns × (1.00,1.00) 537ns × (1.00,1.00) +7.96% (p=0.000)
SetTypeNode1024Slice 15.5µs × (0.99,1.01) 17.8µs × (1.00,1.00) +15.27% (p=0.000)
The above compares always using a cached pointer mask (and the
corresponding waste of memory) against using the programs directly.
Some slowdown is expected, in exchange for having a better general algorithm.
The GC programs kick in for SetTypeNode128, SetTypeNode130, SetTypeNode1024,
along with the slice variants of those.
It is possible that the cutoff of 128 words (bits) should be raised
in a followup CL, but even with this low cutoff the GC programs are
faster than Go 1.4's "fast path" non-GC program case.
Benchmarks for heapBitsSetType, Go 1.4 vs this CL:
name old mean new mean delta
SetTypePtr 6.89ns × (1.00,1.00) 5.17ns × (1.00,1.00) -25.02% (p=0.000)
SetTypePtr8 25.8ns × (0.97,1.05) 21.5ns × (1.00,1.00) -16.70% (p=0.000)
SetTypePtr16 39.8ns × (0.97,1.02) 24.7ns × (0.99,1.01) -37.81% (p=0.000)
SetTypePtr32 68.8ns × (0.98,1.01) 32.2ns × (1.00,1.01) -53.18% (p=0.000)
SetTypePtr64 130ns × (1.00,1.00) 47ns × (1.00,1.00) -63.67% (p=0.000)
SetTypePtr126 241ns × (0.99,1.01) 79ns × (1.00,1.01) -67.25% (p=0.000)
SetTypePtr128 2.07µs × (1.00,1.00) 0.08µs × (1.00,1.00) -96.27% (p=0.000)
SetTypePtrSlice 1.05µs × (0.99,1.01) 0.72µs × (0.99,1.02) -31.70% (p=0.000)
SetTypeNode1 16.0ns × (0.99,1.01) 20.8ns × (0.99,1.03) +29.91% (p=0.000)
SetTypeNode1Slice 184ns × (0.99,1.01) 112ns × (0.99,1.01) -39.26% (p=0.000)
SetTypeNode8 29.5ns × (0.97,1.02) 24.6ns × (1.00,1.00) -16.50% (p=0.000)
SetTypeNode8Slice 624ns × (0.98,1.02) 285ns × (1.00,1.00) -54.31% (p=0.000)
SetTypeNode64 135ns × (0.96,1.08) 52ns × (0.99,1.02) -61.32% (p=0.000)
SetTypeNode64Slice 3.83µs × (1.00,1.00) 1.14µs × (0.99,1.01) -70.16% (p=0.000)
SetTypeNode64Dead 134ns × (0.99,1.01) 32ns × (1.00,1.01) -75.74% (p=0.000)
SetTypeNode64DeadSlice 3.83µs × (0.99,1.00) 1.40µs × (1.00,1.01) -63.42% (p=0.000)
SetTypeNode124 240ns × (0.99,1.01) 79ns × (1.00,1.01) -67.05% (p=0.000)
SetTypeNode124Slice 7.27µs × (1.00,1.00) 2.04µs × (1.00,1.00) -71.95% (p=0.000)
SetTypeNode126 2.06µs × (0.99,1.01) 0.08µs × (0.99,1.01) -96.23% (p=0.000)
SetTypeNode126Slice 64.4µs × (1.00,1.00) 2.0µs × (1.00,1.00) -96.85% (p=0.000)
SetTypeNode128 2.09µs × (1.00,1.01) 0.12µs × (1.00,1.00) -94.15% (p=0.000)
SetTypeNode128Slice 65.4µs × (1.00,1.00) 2.4µs × (0.99,1.03) -96.39% (p=0.000)
SetTypeNode130 2.11µs × (1.00,1.00) 0.12µs × (1.00,1.00) -94.18% (p=0.000)
SetTypeNode130Slice 66.3µs × (1.00,1.00) 2.4µs × (0.97,1.08) -96.34% (p=0.000)
SetTypeNode1024 16.0µs × (1.00,1.01) 0.5µs × (1.00,1.00) -96.65% (p=0.000)
SetTypeNode1024Slice 512µs × (1.00,1.00) 18µs × (0.98,1.04) -96.45% (p=0.000)
SetTypeNode124 uses a 124 data + 2 ptr = 126-word allocation.
Both Go 1.4 and this CL are using pointer bitmaps for this case,
so that's an overall 3x speedup for using pointer bitmaps.
SetTypeNode128 uses a 128 data + 2 ptr = 130-word allocation.
Both Go 1.4 and this CL are running the GC program for this case,
so that's an overall 17x speedup when using GC programs (and
I've seen >20x on other systems).
Comparing Go 1.4's SetTypeNode124 (pointer bitmap) against
this CL's SetTypeNode128 (GC program), the slow path in the
code in this CL is 2x faster than the fast path in Go 1.4.
The Go 1 benchmarks are basically unaffected compared to just before this CL.
Go 1 benchmarks, before this CL vs this CL:
name old mean new mean delta
BinaryTree17 5.87s × (0.97,1.04) 5.91s × (0.96,1.04) ~ (p=0.306)
Fannkuch11 4.38s × (1.00,1.00) 4.37s × (1.00,1.01) -0.22% (p=0.006)
FmtFprintfEmpty 90.7ns × (0.97,1.10) 89.3ns × (0.96,1.09) ~ (p=0.280)
FmtFprintfString 282ns × (0.98,1.04) 287ns × (0.98,1.07) +1.72% (p=0.039)
FmtFprintfInt 269ns × (0.99,1.03) 282ns × (0.97,1.04) +4.87% (p=0.000)
FmtFprintfIntInt 478ns × (0.99,1.02) 481ns × (0.99,1.02) +0.61% (p=0.048)
FmtFprintfPrefixedInt 399ns × (0.98,1.03) 400ns × (0.98,1.05) ~ (p=0.533)
FmtFprintfFloat 563ns × (0.99,1.01) 570ns × (1.00,1.01) +1.37% (p=0.000)
FmtManyArgs 1.89µs × (0.99,1.01) 1.92µs × (0.99,1.02) +1.88% (p=0.000)
GobDecode 15.2ms × (0.99,1.01) 15.2ms × (0.98,1.05) ~ (p=0.609)
GobEncode 11.6ms × (0.98,1.03) 11.9ms × (0.98,1.04) +2.17% (p=0.000)
Gzip 648ms × (0.99,1.01) 648ms × (1.00,1.01) ~ (p=0.835)
Gunzip 142ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.169)
HTTPClientServer 90.5µs × (0.98,1.03) 91.5µs × (0.98,1.04) +1.04% (p=0.045)
JSONEncode 31.5ms × (0.98,1.03) 31.4ms × (0.98,1.03) ~ (p=0.549)
JSONDecode 111ms × (0.99,1.01) 107ms × (0.99,1.01) -3.21% (p=0.000)
Mandelbrot200 6.01ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.878)
GoParse 6.54ms × (0.99,1.02) 6.61ms × (0.99,1.03) +1.08% (p=0.004)
RegexpMatchEasy0_32 160ns × (1.00,1.01) 161ns × (1.00,1.00) +0.40% (p=0.000)
RegexpMatchEasy0_1K 560ns × (0.99,1.01) 559ns × (0.99,1.01) ~ (p=0.088)
RegexpMatchEasy1_32 138ns × (0.99,1.01) 138ns × (1.00,1.00) ~ (p=0.380)
RegexpMatchEasy1_1K 877ns × (1.00,1.00) 878ns × (1.00,1.00) ~ (p=0.157)
RegexpMatchMedium_32 251ns × (0.99,1.00) 251ns × (1.00,1.01) +0.28% (p=0.021)
RegexpMatchMedium_1K 72.6µs × (1.00,1.00) 72.6µs × (1.00,1.00) ~ (p=0.539)
RegexpMatchHard_32 3.84µs × (1.00,1.00) 3.84µs × (1.00,1.00) ~ (p=0.378)
RegexpMatchHard_1K 117µs × (1.00,1.00) 117µs × (1.00,1.00) ~ (p=0.067)
Revcomp 904ms × (0.99,1.02) 904ms × (0.99,1.01) ~ (p=0.943)
Template 125ms × (0.99,1.02) 127ms × (0.99,1.01) +1.79% (p=0.000)
TimeParse 627ns × (0.99,1.01) 622ns × (0.99,1.01) -0.88% (p=0.000)
TimeFormat 655ns × (0.99,1.02) 655ns × (0.99,1.02) ~ (p=0.976)
For the record, Go 1 benchmarks, Go 1.4 vs this CL:
name old mean new mean delta
BinaryTree17 4.61s × (0.97,1.05) 5.91s × (0.98,1.03) +28.35% (p=0.000)
Fannkuch11 4.40s × (0.99,1.03) 4.41s × (0.99,1.01) ~ (p=0.212)
FmtFprintfEmpty 102ns × (0.99,1.01) 84ns × (0.99,1.02) -18.38% (p=0.000)
FmtFprintfString 302ns × (0.98,1.01) 303ns × (0.99,1.02) ~ (p=0.203)
FmtFprintfInt 313ns × (0.97,1.05) 270ns × (0.99,1.01) -13.69% (p=0.000)
FmtFprintfIntInt 524ns × (0.98,1.02) 477ns × (0.99,1.00) -8.87% (p=0.000)
FmtFprintfPrefixedInt 424ns × (0.98,1.02) 386ns × (0.99,1.01) -8.96% (p=0.000)
FmtFprintfFloat 652ns × (0.98,1.02) 594ns × (0.97,1.05) -8.97% (p=0.000)
FmtManyArgs 2.13µs × (0.99,1.02) 1.94µs × (0.99,1.01) -8.92% (p=0.000)
GobDecode 17.1ms × (0.99,1.02) 14.9ms × (0.98,1.03) -13.07% (p=0.000)
GobEncode 13.5ms × (0.98,1.03) 11.5ms × (0.98,1.03) -15.25% (p=0.000)
Gzip 656ms × (0.99,1.02) 647ms × (0.99,1.01) -1.29% (p=0.000)
Gunzip 143ms × (0.99,1.02) 144ms × (0.99,1.01) ~ (p=0.204)
HTTPClientServer 88.2µs × (0.98,1.02) 90.8µs × (0.98,1.01) +2.93% (p=0.000)
JSONEncode 32.2ms × (0.98,1.02) 30.9ms × (0.97,1.04) -4.06% (p=0.001)
JSONDecode 121ms × (0.98,1.02) 110ms × (0.98,1.05) -8.95% (p=0.000)
Mandelbrot200 6.06ms × (0.99,1.01) 6.11ms × (0.98,1.04) ~ (p=0.184)
GoParse 6.76ms × (0.97,1.04) 6.58ms × (0.98,1.05) -2.63% (p=0.003)
RegexpMatchEasy0_32 195ns × (1.00,1.01) 155ns × (0.99,1.01) -20.43% (p=0.000)
RegexpMatchEasy0_1K 479ns × (0.98,1.03) 535ns × (0.99,1.02) +11.59% (p=0.000)
RegexpMatchEasy1_32 169ns × (0.99,1.02) 131ns × (0.99,1.03) -22.44% (p=0.000)
RegexpMatchEasy1_1K 1.53µs × (0.99,1.01) 0.87µs × (0.99,1.02) -43.07% (p=0.000)
RegexpMatchMedium_32 334ns × (0.99,1.01) 242ns × (0.99,1.01) -27.53% (p=0.000)
RegexpMatchMedium_1K 125µs × (1.00,1.01) 72µs × (0.99,1.03) -42.53% (p=0.000)
RegexpMatchHard_32 6.03µs × (0.99,1.01) 3.79µs × (0.99,1.01) -37.12% (p=0.000)
RegexpMatchHard_1K 189µs × (0.99,1.02) 115µs × (0.99,1.01) -39.20% (p=0.000)
Revcomp 935ms × (0.96,1.03) 926ms × (0.98,1.02) ~ (p=0.083)
Template 146ms × (0.97,1.05) 119ms × (0.99,1.01) -18.37% (p=0.000)
TimeParse 660ns × (0.99,1.01) 624ns × (0.99,1.02) -5.43% (p=0.000)
TimeFormat 670ns × (0.98,1.02) 710ns × (1.00,1.01) +5.97% (p=0.000)
This CL is a bit larger than I would like, but the compiler, linker, runtime,
and package reflect all need to be in sync about the format of these programs,
so there is no easy way to split this into independent changes (at least
while keeping the build working at each change).
Fixes #9625.
Fixes #10524.
Change-Id: I9e3e20d6097099d0f8532d1cb5b1af528804989a
Reviewed-on: https://go-review.googlesource.com/9888
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
2015-05-07 23:43:18 -06:00
|
|
|
datap.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcdata)), datap.edata-datap.data)
|
|
|
|
datap.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcbss)), datap.ebss-datap.bss)
|
2015-03-29 15:59:00 -06:00
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
memstats.next_gc = heapminimum
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-05-06 13:58:20 -06:00
|
|
|
func readgogc() int32 {
|
|
|
|
p := gogetenv("GOGC")
|
|
|
|
if p == "" {
|
|
|
|
return 100
|
|
|
|
}
|
|
|
|
if p == "off" {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return int32(atoi(p))
|
|
|
|
}
|
|
|
|
|
2015-03-05 14:04:17 -07:00
|
|
|
// gcenable is called after the bulk of the runtime initialization,
|
|
|
|
// just before we're about to start letting user code run.
|
|
|
|
// It kicks off the background sweeper goroutine and enables GC.
|
|
|
|
func gcenable() {
|
|
|
|
c := make(chan int, 1)
|
|
|
|
go bgsweep(c)
|
|
|
|
<-c
|
|
|
|
memstats.enablegc = true // now that runtime is initialized, GC is okay
|
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
func setGCPercent(in int32) (out int32) {
|
|
|
|
lock(&mheap_.lock)
|
|
|
|
out = gcpercent
|
|
|
|
if in < 0 {
|
|
|
|
in = -1
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
gcpercent = in
|
2015-05-06 13:58:20 -06:00
|
|
|
heapminimum = heapminimum * uint64(gcpercent) / 100
|
2015-02-19 11:38:46 -07:00
|
|
|
unlock(&mheap_.lock)
|
|
|
|
return out
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
// Garbage collector phase.
|
|
|
|
// Indicates to write barrier and sychronization task to preform.
|
|
|
|
var gcphase uint32
|
|
|
|
var writeBarrierEnabled bool // compiler emits references to this in write barriers
|
|
|
|
|
|
|
|
// gcBlackenEnabled is 1 if mutator assists and background mark
|
|
|
|
// workers are allowed to blacken objects. This must only be set when
|
|
|
|
// gcphase == _GCmark.
|
|
|
|
var gcBlackenEnabled uint32
|
|
|
|
|
|
|
|
const (
|
|
|
|
_GCoff = iota // GC not running, write barrier disabled
|
|
|
|
_GCquiesce // unused state
|
|
|
|
_GCstw // unused state
|
|
|
|
_GCscan // GC collecting roots into workbufs, write barrier disabled
|
|
|
|
_GCmark // GC marking from workbufs, write barrier ENABLED
|
|
|
|
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
|
|
|
|
_GCsweep // GC mark completed; sweeping in background, write barrier disabled
|
|
|
|
)
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
func setGCPhase(x uint32) {
|
|
|
|
atomicstore(&gcphase, x)
|
runtime: remove wbshadow mode
The write barrier shadow heap was very useful for
developing the write barriers initially, but it's no longer used,
clunky, and dragging the rest of the implementation down.
The gccheckmark mode will find bugs due to missed barriers
when they result in missed marks; wbshadow mode found the
missed barriers more aggressively, but it required an entire
separate copy of the heap. The gccheckmark mode requires
no extra memory, making it more useful in practice.
Compared to previous CL:
name old mean new mean delta
BinaryTree17 5.91s × (0.96,1.06) 5.72s × (0.97,1.03) -3.12% (p=0.000)
Fannkuch11 4.32s × (1.00,1.00) 4.36s × (1.00,1.00) +0.91% (p=0.000)
FmtFprintfEmpty 89.0ns × (0.93,1.10) 86.6ns × (0.96,1.11) ~ (p=0.077)
FmtFprintfString 298ns × (0.98,1.06) 283ns × (0.99,1.04) -4.90% (p=0.000)
FmtFprintfInt 286ns × (0.98,1.03) 283ns × (0.98,1.04) -1.09% (p=0.032)
FmtFprintfIntInt 498ns × (0.97,1.06) 480ns × (0.99,1.02) -3.65% (p=0.000)
FmtFprintfPrefixedInt 408ns × (0.98,1.02) 396ns × (0.99,1.01) -3.00% (p=0.000)
FmtFprintfFloat 587ns × (0.98,1.01) 562ns × (0.99,1.01) -4.34% (p=0.000)
FmtManyArgs 1.94µs × (0.99,1.02) 1.89µs × (0.99,1.01) -2.85% (p=0.000)
GobDecode 15.8ms × (0.98,1.03) 15.7ms × (0.99,1.02) ~ (p=0.251)
GobEncode 12.0ms × (0.96,1.09) 11.8ms × (0.98,1.03) -1.87% (p=0.024)
Gzip 648ms × (0.99,1.01) 647ms × (0.99,1.01) ~ (p=0.688)
Gunzip 143ms × (1.00,1.01) 143ms × (1.00,1.01) ~ (p=0.203)
HTTPClientServer 90.3µs × (0.98,1.01) 89.1µs × (0.99,1.02) -1.30% (p=0.000)
JSONEncode 31.6ms × (0.99,1.01) 31.7ms × (0.98,1.02) ~ (p=0.219)
JSONDecode 107ms × (1.00,1.01) 111ms × (0.99,1.01) +3.58% (p=0.000)
Mandelbrot200 6.03ms × (1.00,1.01) 6.01ms × (1.00,1.00) ~ (p=0.077)
GoParse 6.53ms × (0.99,1.03) 6.54ms × (0.99,1.02) ~ (p=0.585)
RegexpMatchEasy0_32 161ns × (1.00,1.01) 161ns × (0.98,1.05) ~ (p=0.948)
RegexpMatchEasy0_1K 541ns × (0.99,1.01) 559ns × (0.98,1.01) +3.32% (p=0.000)
RegexpMatchEasy1_32 138ns × (1.00,1.00) 137ns × (0.99,1.01) -0.55% (p=0.001)
RegexpMatchEasy1_1K 887ns × (0.99,1.01) 878ns × (0.99,1.01) -0.98% (p=0.000)
RegexpMatchMedium_32 253ns × (0.99,1.01) 252ns × (0.99,1.01) -0.39% (p=0.001)
RegexpMatchMedium_1K 72.8µs × (1.00,1.00) 72.7µs × (1.00,1.00) ~ (p=0.485)
RegexpMatchHard_32 3.85µs × (1.00,1.01) 3.85µs × (1.00,1.01) ~ (p=0.283)
RegexpMatchHard_1K 117µs × (1.00,1.01) 117µs × (1.00,1.00) ~ (p=0.175)
Revcomp 922ms × (0.97,1.08) 903ms × (0.98,1.05) -2.15% (p=0.021)
Template 126ms × (0.99,1.01) 126ms × (0.99,1.01) ~ (p=0.943)
TimeParse 628ns × (0.99,1.01) 634ns × (0.99,1.01) +0.92% (p=0.000)
TimeFormat 668ns × (0.99,1.01) 698ns × (0.98,1.03) +4.53% (p=0.000)
It's nice that the microbenchmarks are the ones helped the most,
because those were the ones hurt the most by the conversion from
4-bit to 2-bit heap bitmaps. This CL brings the overall effect of that
process to (compared to CL 9706 patch set 1):
name old mean new mean delta
BinaryTree17 5.87s × (0.94,1.09) 5.72s × (0.97,1.03) -2.57% (p=0.011)
Fannkuch11 4.32s × (1.00,1.00) 4.36s × (1.00,1.00) +0.87% (p=0.000)
FmtFprintfEmpty 89.1ns × (0.95,1.16) 86.6ns × (0.96,1.11) ~ (p=0.090)
FmtFprintfString 283ns × (0.98,1.02) 283ns × (0.99,1.04) ~ (p=0.681)
FmtFprintfInt 284ns × (0.98,1.04) 283ns × (0.98,1.04) ~ (p=0.620)
FmtFprintfIntInt 486ns × (0.98,1.03) 480ns × (0.99,1.02) -1.27% (p=0.002)
FmtFprintfPrefixedInt 400ns × (0.99,1.02) 396ns × (0.99,1.01) -0.84% (p=0.001)
FmtFprintfFloat 566ns × (0.99,1.01) 562ns × (0.99,1.01) -0.80% (p=0.000)
FmtManyArgs 1.91µs × (0.99,1.02) 1.89µs × (0.99,1.01) -1.10% (p=0.000)
GobDecode 15.5ms × (0.98,1.05) 15.7ms × (0.99,1.02) +1.55% (p=0.005)
GobEncode 11.9ms × (0.97,1.03) 11.8ms × (0.98,1.03) -0.97% (p=0.048)
Gzip 648ms × (0.99,1.01) 647ms × (0.99,1.01) ~ (p=0.627)
Gunzip 143ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.482)
HTTPClientServer 89.2µs × (0.99,1.02) 89.1µs × (0.99,1.02) ~ (p=0.740)
JSONEncode 32.3ms × (0.97,1.06) 31.7ms × (0.98,1.02) -1.95% (p=0.002)
JSONDecode 106ms × (0.99,1.01) 111ms × (0.99,1.01) +4.22% (p=0.000)
Mandelbrot200 6.02ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~ (p=0.417)
GoParse 6.57ms × (0.97,1.06) 6.54ms × (0.99,1.02) ~ (p=0.404)
RegexpMatchEasy0_32 162ns × (1.00,1.00) 161ns × (0.98,1.05) ~ (p=0.088)
RegexpMatchEasy0_1K 561ns × (0.99,1.02) 559ns × (0.98,1.01) -0.47% (p=0.034)
RegexpMatchEasy1_32 145ns × (0.95,1.04) 137ns × (0.99,1.01) -5.56% (p=0.000)
RegexpMatchEasy1_1K 864ns × (0.99,1.04) 878ns × (0.99,1.01) +1.57% (p=0.000)
RegexpMatchMedium_32 255ns × (0.99,1.04) 252ns × (0.99,1.01) -1.43% (p=0.001)
RegexpMatchMedium_1K 73.9µs × (0.98,1.04) 72.7µs × (1.00,1.00) -1.55% (p=0.004)
RegexpMatchHard_32 3.92µs × (0.98,1.04) 3.85µs × (1.00,1.01) -1.80% (p=0.003)
RegexpMatchHard_1K 120µs × (0.98,1.04) 117µs × (1.00,1.00) -2.13% (p=0.001)
Revcomp 936ms × (0.95,1.08) 903ms × (0.98,1.05) -3.58% (p=0.002)
Template 130ms × (0.98,1.04) 126ms × (0.99,1.01) -2.98% (p=0.000)
TimeParse 638ns × (0.98,1.05) 634ns × (0.99,1.01) ~ (p=0.198)
TimeFormat 674ns × (0.99,1.01) 698ns × (0.98,1.03) +3.69% (p=0.000)
Change-Id: Ia0e9b50b1d75a3c0c7556184cd966305574fe07c
Reviewed-on: https://go-review.googlesource.com/9706
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-05-04 22:26:53 -06:00
|
|
|
writeBarrierEnabled = gcphase == _GCmark || gcphase == _GCmarktermination
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
}
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// gcMarkWorkerMode represents the mode that a concurrent mark worker
|
|
|
|
// should operate in.
|
|
|
|
//
|
|
|
|
// Concurrent marking happens through four different mechanisms. One
|
|
|
|
// is mutator assists, which happen in response to allocations and are
|
|
|
|
// not scheduled. The other three are variations in the per-P mark
|
|
|
|
// workers and are distinguished by gcMarkWorkerMode.
|
|
|
|
type gcMarkWorkerMode int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// gcMarkWorkerDedicatedMode indicates that the P of a mark
|
|
|
|
// worker is dedicated to running that mark worker. The mark
|
|
|
|
// worker should run without preemption until concurrent mark
|
|
|
|
// is done.
|
|
|
|
gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota
|
|
|
|
|
|
|
|
// gcMarkWorkerFractionalMode indicates that a P is currently
|
|
|
|
// running the "fractional" mark worker. The fractional worker
|
|
|
|
// is necessary when GOMAXPROCS*gcGoalUtilization is not an
|
|
|
|
// integer. The fractional worker should run until it is
|
|
|
|
// preempted and will be scheduled to pick up the fractional
|
|
|
|
// part of GOMAXPROCS*gcGoalUtilization.
|
|
|
|
gcMarkWorkerFractionalMode
|
|
|
|
|
|
|
|
// gcMarkWorkerIdleMode indicates that a P is running the mark
|
|
|
|
// worker because it has nothing else to do. The idle worker
|
|
|
|
// should run until it is preempted and account its time
|
|
|
|
// against gcController.idleMarkTime.
|
|
|
|
gcMarkWorkerIdleMode
|
|
|
|
)
|
|
|
|
|
2015-03-12 10:08:47 -06:00
|
|
|
// gcController implements the GC pacing controller that determines
|
|
|
|
// when to trigger concurrent garbage collection and how much marking
|
|
|
|
// work to do in mutator assists and background marking.
|
|
|
|
//
|
|
|
|
// It uses a feedback control algorithm to adjust the memstats.next_gc
|
|
|
|
// trigger based on the heap growth and GC CPU utilization each cycle.
|
|
|
|
// This algorithm optimizes for heap growth to match GOGC and for CPU
|
|
|
|
// utilization between assist and background marking to be 25% of
|
|
|
|
// GOMAXPROCS. The high-level design of this algorithm is documented
|
|
|
|
// at http://golang.org/s/go15gcpacing.
|
2015-03-12 15:56:14 -06:00
|
|
|
var gcController = gcControllerState{
|
2015-03-24 08:45:20 -06:00
|
|
|
// Initial trigger ratio guess.
|
|
|
|
triggerRatio: 7 / 8.0,
|
2015-03-12 15:56:14 -06:00
|
|
|
}
|
2015-03-12 10:08:47 -06:00
|
|
|
|
|
|
|
type gcControllerState struct {
|
|
|
|
// scanWork is the total scan work performed this cycle. This
|
|
|
|
// is updated atomically during the cycle. Updates may be
|
|
|
|
// batched arbitrarily, since the value is only read at the
|
|
|
|
// end of the cycle.
|
runtime: use heap scan size as estimate of GC scan work
Currently, the GC uses a moving average of recent scan work ratios to
estimate the total scan work required by this cycle. This is in turn
used to compute how much scan work should be done by mutators when
they allocate in order to perform all expected scan work by the time
the allocated heap reaches the heap goal.
However, our current scan work estimate can be arbitrarily wrong if
the heap topography changes significantly from one cycle to the
next. For example, in the go1 benchmarks, at the beginning of each
benchmark, the heap is dominated by a 256MB no-scan object, so the GC
learns that the scan density of the heap is very low. In benchmarks
that then rapidly allocate pointer-dense objects, by the time of the
next GC cycle, our estimate of the scan work can be too low by a large
factor. This in turn lets the mutator allocate faster than the GC can
collect, allowing it to get arbitrarily far ahead of the scan work
estimate, which leads to very long GC cycles with very little mutator
assist that can overshoot the heap goal by large margins. This is
particularly easy to demonstrate with BinaryTree17:
$ GODEBUG=gctrace=1 ./go1.test -test.bench BinaryTree17
gc #1 @0.017s 2%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 4->262->262 MB, 4 MB goal, 1 P
gc #2 @0.026s 3%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 262->262->262 MB, 524 MB goal, 1 P
testing: warning: no tests to run
PASS
BenchmarkBinaryTree17 gc #3 @1.906s 0%: 0+0+0+0+7 ms clock, 0+0+0+0/0/0+7 ms cpu, 325->325->287 MB, 325 MB goal, 1 P (forced)
gc #4 @12.203s 20%: 0+0+0+10067+10 ms clock, 0+0+0+0/2523/852+10 ms cpu, 430->2092->1950 MB, 574 MB goal, 1 P
1 9150447353 ns/op
Change this estimate to instead use the *current* scannable heap
size. This has the advantage of being based solely on the current
state of the heap, not on past densities or reachable heap sizes, so
it isn't susceptible to falling behind during these sorts of phase
changes. This is strictly an over-estimate, but it's better to
over-estimate and get more assist than necessary than it is to
under-estimate and potentially spiral out of control. Experiments with
scaling this estimate back showed no obvious benefit for mutator
utilization, heap size, or assist time.
This new estimate has little effect for most benchmarks, including
most go1 benchmarks, x/benchmarks, and the 6g benchmark. It has a huge
effect for benchmarks that triggered the bad pacer behavior:
name old mean new mean delta
BinaryTree17 10.0s × (1.00,1.00) 3.5s × (0.98,1.01) -64.93% (p=0.000)
Fannkuch11 2.74s × (1.00,1.01) 2.65s × (1.00,1.00) -3.52% (p=0.000)
FmtFprintfEmpty 56.4ns × (0.99,1.00) 57.8ns × (1.00,1.01) +2.43% (p=0.000)
FmtFprintfString 187ns × (0.99,1.00) 185ns × (0.99,1.01) -1.19% (p=0.010)
FmtFprintfInt 184ns × (1.00,1.00) 183ns × (1.00,1.00) (no variance)
FmtFprintfIntInt 321ns × (1.00,1.00) 315ns × (1.00,1.00) -1.80% (p=0.000)
FmtFprintfPrefixedInt 266ns × (1.00,1.00) 263ns × (1.00,1.00) -1.22% (p=0.000)
FmtFprintfFloat 353ns × (1.00,1.00) 353ns × (1.00,1.00) -0.13% (p=0.035)
FmtManyArgs 1.21µs × (1.00,1.00) 1.19µs × (1.00,1.00) -1.33% (p=0.000)
GobDecode 9.69ms × (1.00,1.00) 9.59ms × (1.00,1.00) -1.07% (p=0.000)
GobEncode 7.89ms × (0.99,1.01) 7.74ms × (1.00,1.00) -1.92% (p=0.000)
Gzip 391ms × (1.00,1.00) 392ms × (1.00,1.00) ~ (p=0.522)
Gunzip 97.1ms × (1.00,1.00) 97.0ms × (1.00,1.00) -0.10% (p=0.000)
HTTPClientServer 55.7µs × (0.99,1.01) 56.7µs × (0.99,1.01) +1.81% (p=0.001)
JSONEncode 19.1ms × (1.00,1.00) 19.0ms × (1.00,1.00) -0.85% (p=0.000)
JSONDecode 66.8ms × (1.00,1.00) 66.9ms × (1.00,1.00) ~ (p=0.288)
Mandelbrot200 4.13ms × (1.00,1.00) 4.12ms × (1.00,1.00) -0.08% (p=0.000)
GoParse 3.97ms × (1.00,1.01) 4.01ms × (1.00,1.00) +0.99% (p=0.000)
RegexpMatchEasy0_32 114ns × (1.00,1.00) 115ns × (0.99,1.00) ~ (p=0.070)
RegexpMatchEasy0_1K 376ns × (1.00,1.00) 376ns × (1.00,1.00) ~ (p=0.900)
RegexpMatchEasy1_32 94.9ns × (1.00,1.00) 96.3ns × (1.00,1.01) +1.53% (p=0.001)
RegexpMatchEasy1_1K 568ns × (1.00,1.00) 567ns × (1.00,1.00) -0.22% (p=0.001)
RegexpMatchMedium_32 159ns × (1.00,1.00) 159ns × (1.00,1.00) ~ (p=0.178)
RegexpMatchMedium_1K 46.4µs × (1.00,1.00) 46.6µs × (1.00,1.00) +0.29% (p=0.000)
RegexpMatchHard_32 2.37µs × (1.00,1.00) 2.37µs × (1.00,1.00) ~ (p=0.722)
RegexpMatchHard_1K 71.1µs × (1.00,1.00) 71.2µs × (1.00,1.00) ~ (p=0.229)
Revcomp 565ms × (1.00,1.00) 562ms × (1.00,1.00) -0.52% (p=0.000)
Template 81.0ms × (1.00,1.00) 80.2ms × (1.00,1.00) -0.97% (p=0.000)
TimeParse 380ns × (1.00,1.00) 380ns × (1.00,1.00) ~ (p=0.148)
TimeFormat 405ns × (0.99,1.00) 385ns × (0.99,1.00) -5.00% (p=0.000)
Change-Id: I11274158bf3affaf62662e02de7af12d5fb789e4
Reviewed-on: https://go-review.googlesource.com/9696
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
2015-05-04 14:55:31 -06:00
|
|
|
//
|
|
|
|
// Currently this is the bytes of heap scanned. For most uses,
|
|
|
|
// this is an opaque unit of work, but for estimation the
|
|
|
|
// definition is important.
|
2015-03-12 10:08:47 -06:00
|
|
|
scanWork int64
|
2015-03-12 15:56:14 -06:00
|
|
|
|
2015-03-13 11:29:23 -06:00
|
|
|
// bgScanCredit is the scan work credit accumulated by the
|
|
|
|
// concurrent background scan. This credit is accumulated by
|
|
|
|
// the background scan and stolen by mutator assists. This is
|
|
|
|
// updated atomically. Updates occur in bounded batches, since
|
|
|
|
// it is both written and read throughout the cycle.
|
|
|
|
bgScanCredit int64
|
|
|
|
|
2015-03-17 10:17:47 -06:00
|
|
|
// assistTime is the nanoseconds spent in mutator assists
|
|
|
|
// during this cycle. This is updated atomically. Updates
|
|
|
|
// occur in bounded batches, since it is both written and read
|
|
|
|
// throughout the cycle.
|
|
|
|
assistTime int64
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// dedicatedMarkTime is the nanoseconds spent in dedicated
|
|
|
|
// mark workers during this cycle. This is updated atomically
|
|
|
|
// at the end of the concurrent mark phase.
|
|
|
|
dedicatedMarkTime int64
|
|
|
|
|
|
|
|
// fractionalMarkTime is the nanoseconds spent in the
|
|
|
|
// fractional mark worker during this cycle. This is updated
|
|
|
|
// atomically throughout the cycle and will be up-to-date if
|
|
|
|
// the fractional mark worker is not currently running.
|
|
|
|
fractionalMarkTime int64
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
|
|
|
// idleMarkTime is the nanoseconds spent in idle marking
|
|
|
|
// during this cycle. This is udpated atomically throughout
|
|
|
|
// the cycle.
|
|
|
|
idleMarkTime int64
|
|
|
|
|
|
|
|
// bgMarkStartTime is the absolute start time in nanoseconds
|
|
|
|
// that the background mark phase started.
|
|
|
|
bgMarkStartTime int64
|
|
|
|
|
2015-04-17 14:26:55 -06:00
|
|
|
// heapGoal is the goal memstats.heap_live for when this cycle
|
|
|
|
// ends. This is computed at the beginning of each cycle.
|
|
|
|
heapGoal uint64
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// dedicatedMarkWorkersNeeded is the number of dedicated mark
|
|
|
|
// workers that need to be started. This is computed at the
|
|
|
|
// beginning of each cycle and decremented atomically as
|
|
|
|
// dedicated mark workers get started.
|
|
|
|
dedicatedMarkWorkersNeeded int64
|
|
|
|
|
2015-03-16 12:22:00 -06:00
|
|
|
// assistRatio is the ratio of allocated bytes to scan work
|
|
|
|
// that should be performed by mutator assists. This is
|
|
|
|
// computed at the beginning of each cycle.
|
|
|
|
assistRatio float64
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// fractionalUtilizationGoal is the fraction of wall clock
|
|
|
|
// time that should be spent in the fractional mark worker.
|
|
|
|
// For example, if the overall mark utilization goal is 25%
|
|
|
|
// and GOMAXPROCS is 6, one P will be a dedicated mark worker
|
|
|
|
// and this will be set to 0.5 so that 50% of the time some P
|
|
|
|
// is in a fractional mark worker. This is computed at the
|
|
|
|
// beginning of each cycle.
|
|
|
|
fractionalUtilizationGoal float64
|
|
|
|
|
2015-03-24 08:45:20 -06:00
|
|
|
// triggerRatio is the heap growth ratio at which the garbage
|
|
|
|
// collection cycle should start. E.g., if this is 0.6, then
|
|
|
|
// GC should start when the live heap has reached 1.6 times
|
|
|
|
// the heap size marked by the previous cycle. This is updated
|
|
|
|
// at the end of of each cycle.
|
|
|
|
triggerRatio float64
|
|
|
|
|
2015-04-22 14:35:45 -06:00
|
|
|
// reviseTimer is a timer that triggers periodic revision of
|
|
|
|
// control variables during the cycle.
|
|
|
|
reviseTimer timer
|
|
|
|
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
_ [_CacheLineSize]byte
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// fractionalMarkWorkersNeeded is the number of fractional
|
|
|
|
// mark workers that need to be started. This is either 0 or
|
|
|
|
// 1. This is potentially updated atomically at every
|
|
|
|
// scheduling point (hence it gets its own cache line).
|
|
|
|
fractionalMarkWorkersNeeded int64
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
|
|
|
_ [_CacheLineSize]byte
|
2015-03-12 10:08:47 -06:00
|
|
|
}
|
|
|
|
|
2015-03-12 15:56:14 -06:00
|
|
|
// startCycle resets the GC controller's state and computes estimates
|
2015-03-17 10:17:47 -06:00
|
|
|
// for a new GC cycle. The caller must hold worldsema.
|
2015-03-12 10:08:47 -06:00
|
|
|
func (c *gcControllerState) startCycle() {
|
|
|
|
c.scanWork = 0
|
2015-03-13 11:29:23 -06:00
|
|
|
c.bgScanCredit = 0
|
2015-03-17 10:17:47 -06:00
|
|
|
c.assistTime = 0
|
2015-04-15 15:01:30 -06:00
|
|
|
c.dedicatedMarkTime = 0
|
|
|
|
c.fractionalMarkTime = 0
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
c.idleMarkTime = 0
|
2015-03-12 15:56:14 -06:00
|
|
|
|
|
|
|
// If this is the first GC cycle or we're operating on a very
|
|
|
|
// small heap, fake heap_marked so it looks like next_gc is
|
|
|
|
// the appropriate growth from heap_marked, even though the
|
|
|
|
// real heap_marked may not have a meaningful value (on the
|
|
|
|
// first cycle) or may be much smaller (resulting in a large
|
|
|
|
// error response).
|
|
|
|
if memstats.next_gc <= heapminimum {
|
2015-03-24 08:45:20 -06:00
|
|
|
memstats.heap_marked = uint64(float64(memstats.next_gc) / (1 + c.triggerRatio))
|
runtime: use reachable heap estimate to set trigger/goal
Currently, we set the heap goal for the next GC cycle using the size
of the marked heap at the end of the current cycle. This can lead to a
bad feedback loop if the mutator is rapidly allocating and releasing
pointers that can significantly bloat heap size.
If the GC were STW, the marked heap size would be exactly the
reachable heap size (call it stwLive). However, in concurrent GC,
marked=stwLive+floatLive, where floatLive is the amount of "floating
garbage": objects that were reachable at some point during the cycle
and were marked, but which are no longer reachable by the end of the
cycle. If the GC cycle is short, then the mutator doesn't have much
time to create floating garbage, so marked≈stwLive. However, if the GC
cycle is long and the mutator is allocating and creating floating
garbage very rapidly, then it's possible that marked≫stwLive. Since
the runtime currently sets the heap goal based on marked, this will
cause it to set a high heap goal. This means that 1) the next GC cycle
will take longer because of the larger heap and 2) the assist ratio
will be low because of the large distance between the trigger and the
goal. The combination of these lets the mutator produce even more
floating garbage in the next cycle, which further exacerbates the
problem.
For example, on the garbage benchmark with GOMAXPROCS=1, this causes
the heap to grow to ~500MB and the garbage collector to retain upwards
of ~300MB of heap, while the true reachable heap size is ~32MB. This,
in turn, causes the GC cycle to take upwards of ~3 seconds.
Fix this bad feedback loop by estimating the true reachable heap size
(stwLive) and using this rather than the marked heap size
(stwLive+floatLive) as the basis for the GC trigger and heap goal.
This breaks the bad feedback loop and causes the mutator to assist
more, which decreases the rate at which it can create floating
garbage. On the same garbage benchmark, this reduces the maximum heap
size to ~73MB, the retained heap to ~40MB, and the duration of the GC
cycle to ~200ms.
Change-Id: I7712244c94240743b266f9eb720c03802799cdd1
Reviewed-on: https://go-review.googlesource.com/9177
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-21 12:24:25 -06:00
|
|
|
memstats.heap_reachable = memstats.heap_marked
|
2015-03-12 15:56:14 -06:00
|
|
|
}
|
|
|
|
|
2015-04-17 14:26:55 -06:00
|
|
|
// Compute the heap goal for this cycle
|
runtime: use reachable heap estimate to set trigger/goal
Currently, we set the heap goal for the next GC cycle using the size
of the marked heap at the end of the current cycle. This can lead to a
bad feedback loop if the mutator is rapidly allocating and releasing
pointers that can significantly bloat heap size.
If the GC were STW, the marked heap size would be exactly the
reachable heap size (call it stwLive). However, in concurrent GC,
marked=stwLive+floatLive, where floatLive is the amount of "floating
garbage": objects that were reachable at some point during the cycle
and were marked, but which are no longer reachable by the end of the
cycle. If the GC cycle is short, then the mutator doesn't have much
time to create floating garbage, so marked≈stwLive. However, if the GC
cycle is long and the mutator is allocating and creating floating
garbage very rapidly, then it's possible that marked≫stwLive. Since
the runtime currently sets the heap goal based on marked, this will
cause it to set a high heap goal. This means that 1) the next GC cycle
will take longer because of the larger heap and 2) the assist ratio
will be low because of the large distance between the trigger and the
goal. The combination of these lets the mutator produce even more
floating garbage in the next cycle, which further exacerbates the
problem.
For example, on the garbage benchmark with GOMAXPROCS=1, this causes
the heap to grow to ~500MB and the garbage collector to retain upwards
of ~300MB of heap, while the true reachable heap size is ~32MB. This,
in turn, causes the GC cycle to take upwards of ~3 seconds.
Fix this bad feedback loop by estimating the true reachable heap size
(stwLive) and using this rather than the marked heap size
(stwLive+floatLive) as the basis for the GC trigger and heap goal.
This breaks the bad feedback loop and causes the mutator to assist
more, which decreases the rate at which it can create floating
garbage. On the same garbage benchmark, this reduces the maximum heap
size to ~73MB, the retained heap to ~40MB, and the duration of the GC
cycle to ~200ms.
Change-Id: I7712244c94240743b266f9eb720c03802799cdd1
Reviewed-on: https://go-review.googlesource.com/9177
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-21 12:24:25 -06:00
|
|
|
c.heapGoal = memstats.heap_reachable + memstats.heap_reachable*uint64(gcpercent)/100
|
2015-03-17 10:17:47 -06:00
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// Compute the total mark utilization goal and divide it among
|
|
|
|
// dedicated and fractional workers.
|
|
|
|
totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization
|
|
|
|
c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal)
|
|
|
|
c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)
|
|
|
|
if c.fractionalUtilizationGoal > 0 {
|
|
|
|
c.fractionalMarkWorkersNeeded = 1
|
|
|
|
} else {
|
|
|
|
c.fractionalMarkWorkersNeeded = 0
|
|
|
|
}
|
|
|
|
|
2015-03-17 10:17:47 -06:00
|
|
|
// Clear per-P state
|
|
|
|
for _, p := range &allp {
|
|
|
|
if p == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
p.gcAssistTime = 0
|
|
|
|
}
|
|
|
|
|
2015-04-22 14:35:45 -06:00
|
|
|
// Compute initial values for controls that are updated
|
|
|
|
// throughout the cycle.
|
|
|
|
c.revise()
|
|
|
|
|
|
|
|
// Set up a timer to revise periodically
|
|
|
|
c.reviseTimer.f = func(interface{}, uintptr) {
|
|
|
|
gcController.revise()
|
|
|
|
}
|
|
|
|
c.reviseTimer.period = 10 * 1000 * 1000
|
|
|
|
c.reviseTimer.when = nanotime() + c.reviseTimer.period
|
|
|
|
addtimer(&c.reviseTimer)
|
2015-03-12 15:56:14 -06:00
|
|
|
}
|
|
|
|
|
2015-04-17 14:26:55 -06:00
|
|
|
// revise updates the assist ratio during the GC cycle to account for
|
|
|
|
// improved estimates. This should be called periodically during
|
|
|
|
// concurrent mark.
|
|
|
|
func (c *gcControllerState) revise() {
|
runtime: use heap scan size as estimate of GC scan work
Currently, the GC uses a moving average of recent scan work ratios to
estimate the total scan work required by this cycle. This is in turn
used to compute how much scan work should be done by mutators when
they allocate in order to perform all expected scan work by the time
the allocated heap reaches the heap goal.
However, our current scan work estimate can be arbitrarily wrong if
the heap topography changes significantly from one cycle to the
next. For example, in the go1 benchmarks, at the beginning of each
benchmark, the heap is dominated by a 256MB no-scan object, so the GC
learns that the scan density of the heap is very low. In benchmarks
that then rapidly allocate pointer-dense objects, by the time of the
next GC cycle, our estimate of the scan work can be too low by a large
factor. This in turn lets the mutator allocate faster than the GC can
collect, allowing it to get arbitrarily far ahead of the scan work
estimate, which leads to very long GC cycles with very little mutator
assist that can overshoot the heap goal by large margins. This is
particularly easy to demonstrate with BinaryTree17:
$ GODEBUG=gctrace=1 ./go1.test -test.bench BinaryTree17
gc #1 @0.017s 2%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 4->262->262 MB, 4 MB goal, 1 P
gc #2 @0.026s 3%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 262->262->262 MB, 524 MB goal, 1 P
testing: warning: no tests to run
PASS
BenchmarkBinaryTree17 gc #3 @1.906s 0%: 0+0+0+0+7 ms clock, 0+0+0+0/0/0+7 ms cpu, 325->325->287 MB, 325 MB goal, 1 P (forced)
gc #4 @12.203s 20%: 0+0+0+10067+10 ms clock, 0+0+0+0/2523/852+10 ms cpu, 430->2092->1950 MB, 574 MB goal, 1 P
1 9150447353 ns/op
Change this estimate to instead use the *current* scannable heap
size. This has the advantage of being based solely on the current
state of the heap, not on past densities or reachable heap sizes, so
it isn't susceptible to falling behind during these sorts of phase
changes. This is strictly an over-estimate, but it's better to
over-estimate and get more assist than necessary than it is to
under-estimate and potentially spiral out of control. Experiments with
scaling this estimate back showed no obvious benefit for mutator
utilization, heap size, or assist time.
This new estimate has little effect for most benchmarks, including
most go1 benchmarks, x/benchmarks, and the 6g benchmark. It has a huge
effect for benchmarks that triggered the bad pacer behavior:
name old mean new mean delta
BinaryTree17 10.0s × (1.00,1.00) 3.5s × (0.98,1.01) -64.93% (p=0.000)
Fannkuch11 2.74s × (1.00,1.01) 2.65s × (1.00,1.00) -3.52% (p=0.000)
FmtFprintfEmpty 56.4ns × (0.99,1.00) 57.8ns × (1.00,1.01) +2.43% (p=0.000)
FmtFprintfString 187ns × (0.99,1.00) 185ns × (0.99,1.01) -1.19% (p=0.010)
FmtFprintfInt 184ns × (1.00,1.00) 183ns × (1.00,1.00) (no variance)
FmtFprintfIntInt 321ns × (1.00,1.00) 315ns × (1.00,1.00) -1.80% (p=0.000)
FmtFprintfPrefixedInt 266ns × (1.00,1.00) 263ns × (1.00,1.00) -1.22% (p=0.000)
FmtFprintfFloat 353ns × (1.00,1.00) 353ns × (1.00,1.00) -0.13% (p=0.035)
FmtManyArgs 1.21µs × (1.00,1.00) 1.19µs × (1.00,1.00) -1.33% (p=0.000)
GobDecode 9.69ms × (1.00,1.00) 9.59ms × (1.00,1.00) -1.07% (p=0.000)
GobEncode 7.89ms × (0.99,1.01) 7.74ms × (1.00,1.00) -1.92% (p=0.000)
Gzip 391ms × (1.00,1.00) 392ms × (1.00,1.00) ~ (p=0.522)
Gunzip 97.1ms × (1.00,1.00) 97.0ms × (1.00,1.00) -0.10% (p=0.000)
HTTPClientServer 55.7µs × (0.99,1.01) 56.7µs × (0.99,1.01) +1.81% (p=0.001)
JSONEncode 19.1ms × (1.00,1.00) 19.0ms × (1.00,1.00) -0.85% (p=0.000)
JSONDecode 66.8ms × (1.00,1.00) 66.9ms × (1.00,1.00) ~ (p=0.288)
Mandelbrot200 4.13ms × (1.00,1.00) 4.12ms × (1.00,1.00) -0.08% (p=0.000)
GoParse 3.97ms × (1.00,1.01) 4.01ms × (1.00,1.00) +0.99% (p=0.000)
RegexpMatchEasy0_32 114ns × (1.00,1.00) 115ns × (0.99,1.00) ~ (p=0.070)
RegexpMatchEasy0_1K 376ns × (1.00,1.00) 376ns × (1.00,1.00) ~ (p=0.900)
RegexpMatchEasy1_32 94.9ns × (1.00,1.00) 96.3ns × (1.00,1.01) +1.53% (p=0.001)
RegexpMatchEasy1_1K 568ns × (1.00,1.00) 567ns × (1.00,1.00) -0.22% (p=0.001)
RegexpMatchMedium_32 159ns × (1.00,1.00) 159ns × (1.00,1.00) ~ (p=0.178)
RegexpMatchMedium_1K 46.4µs × (1.00,1.00) 46.6µs × (1.00,1.00) +0.29% (p=0.000)
RegexpMatchHard_32 2.37µs × (1.00,1.00) 2.37µs × (1.00,1.00) ~ (p=0.722)
RegexpMatchHard_1K 71.1µs × (1.00,1.00) 71.2µs × (1.00,1.00) ~ (p=0.229)
Revcomp 565ms × (1.00,1.00) 562ms × (1.00,1.00) -0.52% (p=0.000)
Template 81.0ms × (1.00,1.00) 80.2ms × (1.00,1.00) -0.97% (p=0.000)
TimeParse 380ns × (1.00,1.00) 380ns × (1.00,1.00) ~ (p=0.148)
TimeFormat 405ns × (0.99,1.00) 385ns × (0.99,1.00) -5.00% (p=0.000)
Change-Id: I11274158bf3affaf62662e02de7af12d5fb789e4
Reviewed-on: https://go-review.googlesource.com/9696
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
2015-05-04 14:55:31 -06:00
|
|
|
// Compute the expected scan work. This is a strict upper
|
|
|
|
// bound on the possible scan work in the current heap.
|
|
|
|
//
|
|
|
|
// You might consider dividing this by 2 (or by
|
|
|
|
// (100+GOGC)/100) to counter this over-estimation, but
|
|
|
|
// benchmarks show that this has almost no effect on mean
|
|
|
|
// mutator utilization, heap size, or assist time and it
|
|
|
|
// introduces the danger of under-estimating and letting the
|
|
|
|
// mutator outpace the garbage collector.
|
|
|
|
scanWorkExpected := memstats.heap_scan
|
2015-04-17 14:26:55 -06:00
|
|
|
|
|
|
|
// Compute the mutator assist ratio so by the time the mutator
|
|
|
|
// allocates the remaining heap bytes up to next_gc, it will
|
|
|
|
// have done (or stolen) the estimated amount of scan work.
|
runtime: fix underflow in next_gc calculation
Currently, it's possible for the next_gc calculation to underflow.
Since next_gc is unsigned, this wraps around and effectively disables
GC for the rest of the program's execution. Besides being obviously
wrong, this is causing test failures on 32-bit because some tests are
running out of heap.
This underflow happens for two reasons, both having to do with how we
estimate the reachable heap size at the end of the GC cycle.
One reason is that this calculation depends on the value of heap_live
at the beginning of the GC cycle, but we currently only record that
value during a concurrent GC and not during a forced STW GC. Fix this
by moving the recorded value from gcController to work and recording
it on a common code path.
The other reason is that we use the amount of allocation during the GC
cycle as an approximation of the amount of floating garbage and
subtract it from the marked heap to estimate the reachable heap.
However, since this is only an approximation, it's possible for the
amount of allocation during the cycle to be *larger* than the marked
heap size (since the runtime allocates white and it's possible for
these allocations to never be made reachable from the heap). Currently
this causes wrap-around in our estimate of the reachable heap size,
which in turn causes wrap-around in next_gc. Fix this by bottoming out
the reachable heap estimate at 0, in which case we just fall back to
triggering GC at heapminimum (which is okay since this only happens on
small heaps).
Fixes #10555, fixes #10556, and fixes #10559.
Change-Id: Iad07b529c03772356fede2ae557732f13ebfdb63
Reviewed-on: https://go-review.googlesource.com/9286
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-23 11:02:31 -06:00
|
|
|
heapDistance := int64(c.heapGoal) - int64(work.initialHeapLive)
|
2015-04-17 14:26:55 -06:00
|
|
|
if heapDistance <= 1024*1024 {
|
|
|
|
// heapDistance can be negative if GC start is delayed
|
|
|
|
// or if the allocation that pushed heap_live over
|
|
|
|
// next_gc is large or if the trigger is really close
|
|
|
|
// to GOGC. We don't want to set the assist negative
|
|
|
|
// (or divide by zero, or set it really high), so
|
|
|
|
// enforce a minimum on the distance.
|
|
|
|
heapDistance = 1024 * 1024
|
|
|
|
}
|
|
|
|
c.assistRatio = float64(scanWorkExpected) / float64(heapDistance)
|
|
|
|
}
|
|
|
|
|
2015-03-12 15:56:14 -06:00
|
|
|
// endCycle updates the GC controller state at the end of the
|
|
|
|
// concurrent part of the GC cycle.
|
|
|
|
func (c *gcControllerState) endCycle() {
|
2015-04-06 12:30:03 -06:00
|
|
|
h_t := c.triggerRatio // For debugging
|
|
|
|
|
2015-03-24 08:45:20 -06:00
|
|
|
// Proportional response gain for the trigger controller. Must
|
|
|
|
// be in [0, 1]. Lower values smooth out transient effects but
|
|
|
|
// take longer to respond to phase changes. Higher values
|
|
|
|
// react to phase changes quickly, but are more affected by
|
|
|
|
// transient changes. Values near 1 may be unstable.
|
|
|
|
const triggerGain = 0.5
|
|
|
|
|
2015-04-22 14:35:45 -06:00
|
|
|
// Stop the revise timer
|
|
|
|
deltimer(&c.reviseTimer)
|
|
|
|
|
2015-03-24 08:45:20 -06:00
|
|
|
// Compute next cycle trigger ratio. First, this computes the
|
|
|
|
// "error" for this cycle; that is, how far off the trigger
|
|
|
|
// was from what it should have been, accounting for both heap
|
|
|
|
// growth and GC CPU utilization. We computing the actual heap
|
|
|
|
// growth during this cycle and scale that by how far off from
|
|
|
|
// the goal CPU utilization we were (to estimate the heap
|
|
|
|
// growth if we had the desired CPU utilization). The
|
|
|
|
// difference between this estimate and the GOGC-based goal
|
|
|
|
// heap growth is the error.
|
|
|
|
goalGrowthRatio := float64(gcpercent) / 100
|
|
|
|
actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
|
|
|
|
duration := nanotime() - c.bgMarkStartTime
|
runtime: schedule GC work more aggressively
Schedule the work as early as possible, while still respecting the
utilization percentage on average. The old code tried never to
go above the utilization percentage. The new code is willing
to go above the utilization percentage by one time slice
(but of course after doing that it must wait until the percentage
drops back down to the target before it gets another time slice).
The effect is that for concurrent GCs that can run in a small number
of time slices, the time during which write barriers are enabled is
reduced by one mutator + GC time slice round (possibly 30 ms per GC).
This only affects the fractional GC processor (the remainder of GOMAXPROCS/4),
so it matters most in GOMAXPROCS=1, a bit in GOMAXPROCS=2, and not at
all in GOMAXPROCS=4.
GOMAXPROCS=1
name old mean new mean delta
BenchmarkBinaryTree17 12.4s × (0.98,1.03) 13.5s × (0.97,1.04) +8.84% (p=0.000)
BenchmarkFannkuch11 4.38s × (1.00,1.01) 4.38s × (1.00,1.01) ~ (p=0.343)
BenchmarkFmtFprintfEmpty 88.9ns × (0.97,1.10) 90.1ns × (0.93,1.14) ~ (p=0.224)
BenchmarkFmtFprintfString 356ns × (0.94,1.05) 321ns × (0.94,1.12) -9.77% (p=0.000)
BenchmarkFmtFprintfInt 344ns × (0.98,1.03) 325ns × (0.96,1.03) -5.46% (p=0.000)
BenchmarkFmtFprintfIntInt 622ns × (0.97,1.03) 571ns × (0.95,1.05) -8.09% (p=0.000)
BenchmarkFmtFprintfPrefixedInt 462ns × (0.96,1.04) 431ns × (0.95,1.05) -6.81% (p=0.000)
BenchmarkFmtFprintfFloat 653ns × (0.98,1.03) 621ns × (0.99,1.03) -4.90% (p=0.000)
BenchmarkFmtManyArgs 2.32µs × (0.97,1.03) 2.19µs × (0.98,1.02) -5.43% (p=0.000)
BenchmarkGobDecode 27.0ms × (0.96,1.04) 20.0ms × (0.97,1.04) -26.06% (p=0.000)
BenchmarkGobEncode 26.6ms × (0.99,1.01) 17.8ms × (0.95,1.05) -33.19% (p=0.000)
BenchmarkGzip 659ms × (0.98,1.03) 650ms × (0.99,1.01) -1.34% (p=0.000)
BenchmarkGunzip 145ms × (0.98,1.04) 143ms × (1.00,1.01) -1.47% (p=0.000)
BenchmarkHTTPClientServer 111µs × (0.97,1.04) 110µs × (0.96,1.03) -1.30% (p=0.000)
BenchmarkJSONEncode 52.0ms × (0.97,1.03) 40.8ms × (0.97,1.03) -21.47% (p=0.000)
BenchmarkJSONDecode 127ms × (0.98,1.04) 120ms × (0.98,1.02) -5.55% (p=0.000)
BenchmarkMandelbrot200 6.04ms × (0.99,1.04) 6.02ms × (1.00,1.01) ~ (p=0.176)
BenchmarkGoParse 8.62ms × (0.96,1.08) 8.55ms × (0.93,1.09) ~ (p=0.302)
BenchmarkRegexpMatchEasy0_32 164ns × (0.98,1.05) 165ns × (0.98,1.07) ~ (p=0.293)
BenchmarkRegexpMatchEasy0_1K 546ns × (0.98,1.06) 547ns × (0.97,1.07) ~ (p=0.741)
BenchmarkRegexpMatchEasy1_32 142ns × (0.97,1.09) 141ns × (0.97,1.05) ~ (p=0.231)
BenchmarkRegexpMatchEasy1_1K 904ns × (0.97,1.07) 900ns × (0.98,1.04) ~ (p=0.294)
BenchmarkRegexpMatchMedium_32 256ns × (0.98,1.06) 256ns × (0.97,1.04) ~ (p=0.530)
BenchmarkRegexpMatchMedium_1K 74.2µs × (0.98,1.05) 73.8µs × (0.98,1.04) ~ (p=0.334)
BenchmarkRegexpMatchHard_32 3.94µs × (0.98,1.07) 3.92µs × (0.98,1.05) ~ (p=0.356)
BenchmarkRegexpMatchHard_1K 119µs × (0.98,1.07) 119µs × (0.98,1.06) ~ (p=0.467)
BenchmarkRevcomp 978ms × (0.96,1.09) 984ms × (0.95,1.07) ~ (p=0.448)
BenchmarkTemplate 151ms × (0.96,1.03) 142ms × (0.95,1.04) -5.55% (p=0.000)
BenchmarkTimeParse 628ns × (0.99,1.01) 628ns × (0.99,1.01) ~ (p=0.855)
BenchmarkTimeFormat 729ns × (0.98,1.06) 734ns × (0.97,1.05) ~ (p=0.149)
GOMAXPROCS=2
name old mean new mean delta
BenchmarkBinaryTree17-2 9.80s × (0.97,1.03) 9.85s × (0.99,1.02) ~ (p=0.444)
BenchmarkFannkuch11-2 4.35s × (0.99,1.01) 4.40s × (0.98,1.05) ~ (p=0.099)
BenchmarkFmtFprintfEmpty-2 86.7ns × (0.97,1.05) 85.9ns × (0.98,1.04) ~ (p=0.409)
BenchmarkFmtFprintfString-2 297ns × (0.98,1.01) 297ns × (0.99,1.01) ~ (p=0.743)
BenchmarkFmtFprintfInt-2 309ns × (0.98,1.02) 310ns × (0.99,1.01) ~ (p=0.464)
BenchmarkFmtFprintfIntInt-2 525ns × (0.97,1.05) 518ns × (0.99,1.01) ~ (p=0.151)
BenchmarkFmtFprintfPrefixedInt-2 408ns × (0.98,1.02) 408ns × (0.98,1.03) ~ (p=0.797)
BenchmarkFmtFprintfFloat-2 603ns × (0.99,1.01) 604ns × (0.98,1.02) ~ (p=0.588)
BenchmarkFmtManyArgs-2 2.07µs × (0.98,1.02) 2.05µs × (0.99,1.01) ~ (p=0.091)
BenchmarkGobDecode-2 19.1ms × (0.97,1.01) 19.3ms × (0.97,1.04) ~ (p=0.195)
BenchmarkGobEncode-2 16.2ms × (0.97,1.03) 16.4ms × (0.99,1.01) ~ (p=0.069)
BenchmarkGzip-2 652ms × (0.99,1.01) 651ms × (0.99,1.01) ~ (p=0.705)
BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.00) ~ (p=0.665)
BenchmarkHTTPClientServer-2 149µs × (0.92,1.11) 149µs × (0.91,1.08) ~ (p=0.862)
BenchmarkJSONEncode-2 34.6ms × (0.98,1.02) 37.2ms × (0.99,1.01) +7.56% (p=0.000)
BenchmarkJSONDecode-2 117ms × (0.99,1.01) 117ms × (0.99,1.01) ~ (p=0.858)
BenchmarkMandelbrot200-2 6.10ms × (0.99,1.03) 6.03ms × (1.00,1.00) ~ (p=0.083)
BenchmarkGoParse-2 8.25ms × (0.98,1.01) 8.21ms × (0.99,1.02) ~ (p=0.307)
BenchmarkRegexpMatchEasy0_32-2 162ns × (0.99,1.02) 162ns × (0.99,1.01) ~ (p=0.857)
BenchmarkRegexpMatchEasy0_1K-2 541ns × (0.99,1.01) 540ns × (1.00,1.00) ~ (p=0.530)
BenchmarkRegexpMatchEasy1_32-2 138ns × (1.00,1.00) 141ns × (0.98,1.04) +1.88% (p=0.038)
BenchmarkRegexpMatchEasy1_1K-2 887ns × (0.99,1.01) 894ns × (0.99,1.01) ~ (p=0.087)
BenchmarkRegexpMatchMedium_32-2 252ns × (0.99,1.01) 252ns × (0.99,1.01) ~ (p=0.954)
BenchmarkRegexpMatchMedium_1K-2 73.4µs × (0.99,1.02) 72.8µs × (1.00,1.01) -0.87% (p=0.029)
BenchmarkRegexpMatchHard_32-2 3.95µs × (0.97,1.05) 3.87µs × (1.00,1.01) -2.11% (p=0.035)
BenchmarkRegexpMatchHard_1K-2 117µs × (0.99,1.01) 117µs × (0.99,1.01) ~ (p=0.669)
BenchmarkRevcomp-2 980ms × (0.95,1.03) 993ms × (0.94,1.09) ~ (p=0.527)
BenchmarkTemplate-2 136ms × (0.98,1.01) 135ms × (0.99,1.01) ~ (p=0.200)
BenchmarkTimeParse-2 630ns × (1.00,1.01) 630ns × (1.00,1.00) ~ (p=0.634)
BenchmarkTimeFormat-2 705ns × (0.99,1.01) 710ns × (0.98,1.02) ~ (p=0.174)
GOMAXPROCS=4
BenchmarkBinaryTree17-4 9.87s × (0.96,1.04) 9.75s × (0.96,1.03) ~ (p=0.178)
BenchmarkFannkuch11-4 4.35s × (1.00,1.01) 4.40s × (0.99,1.04) ~ (p=0.071)
BenchmarkFmtFprintfEmpty-4 85.8ns × (0.98,1.06) 85.6ns × (0.98,1.04) ~ (p=0.858)
BenchmarkFmtFprintfString-4 306ns × (0.99,1.03) 304ns × (0.97,1.02) ~ (p=0.470)
BenchmarkFmtFprintfInt-4 317ns × (0.98,1.01) 315ns × (0.98,1.02) -0.92% (p=0.044)
BenchmarkFmtFprintfIntInt-4 527ns × (0.99,1.01) 525ns × (0.98,1.01) ~ (p=0.164)
BenchmarkFmtFprintfPrefixedInt-4 421ns × (0.98,1.03) 417ns × (0.99,1.02) ~ (p=0.092)
BenchmarkFmtFprintfFloat-4 623ns × (0.98,1.02) 618ns × (0.98,1.03) ~ (p=0.172)
BenchmarkFmtManyArgs-4 2.09µs × (0.98,1.02) 2.09µs × (0.98,1.02) ~ (p=0.679)
BenchmarkGobDecode-4 18.6ms × (0.99,1.01) 18.6ms × (0.98,1.03) ~ (p=0.595)
BenchmarkGobEncode-4 15.0ms × (0.98,1.02) 15.1ms × (0.99,1.01) ~ (p=0.301)
BenchmarkGzip-4 659ms × (0.98,1.04) 660ms × (0.97,1.02) ~ (p=0.724)
BenchmarkGunzip-4 145ms × (0.98,1.04) 144ms × (0.99,1.04) ~ (p=0.671)
BenchmarkHTTPClientServer-4 139µs × (0.97,1.02) 138µs × (0.99,1.02) ~ (p=0.392)
BenchmarkJSONEncode-4 35.0ms × (0.99,1.02) 35.1ms × (0.98,1.02) ~ (p=0.777)
BenchmarkJSONDecode-4 119ms × (0.98,1.01) 118ms × (0.98,1.02) ~ (p=0.710)
BenchmarkMandelbrot200-4 6.02ms × (1.00,1.00) 6.02ms × (1.00,1.00) ~ (p=0.289)
BenchmarkGoParse-4 7.96ms × (0.99,1.01) 7.96ms × (0.99,1.01) ~ (p=0.884)
BenchmarkRegexpMatchEasy0_32-4 164ns × (0.98,1.04) 166ns × (0.97,1.04) ~ (p=0.221)
BenchmarkRegexpMatchEasy0_1K-4 540ns × (0.99,1.01) 552ns × (0.97,1.04) +2.10% (p=0.018)
BenchmarkRegexpMatchEasy1_32-4 140ns × (0.99,1.04) 142ns × (0.97,1.04) ~ (p=0.226)
BenchmarkRegexpMatchEasy1_1K-4 896ns × (0.99,1.03) 907ns × (0.97,1.04) ~ (p=0.155)
BenchmarkRegexpMatchMedium_32-4 255ns × (0.99,1.04) 255ns × (0.98,1.04) ~ (p=0.904)
BenchmarkRegexpMatchMedium_1K-4 73.4µs × (0.99,1.04) 73.8µs × (0.98,1.04) ~ (p=0.560)
BenchmarkRegexpMatchHard_32-4 3.93µs × (0.98,1.04) 3.95µs × (0.98,1.04) ~ (p=0.571)
BenchmarkRegexpMatchHard_1K-4 117µs × (1.00,1.01) 119µs × (0.98,1.04) +1.48% (p=0.048)
BenchmarkRevcomp-4 990ms × (0.94,1.08) 989ms × (0.94,1.10) ~ (p=0.957)
BenchmarkTemplate-4 137ms × (0.98,1.02) 137ms × (0.99,1.01) ~ (p=0.996)
BenchmarkTimeParse-4 629ns × (1.00,1.00) 629ns × (0.99,1.01) ~ (p=0.924)
BenchmarkTimeFormat-4 710ns × (0.99,1.01) 716ns × (0.98,1.02) +0.84% (p=0.033)
Change-Id: I43a04e0f6ad5e3ba9847dddf12e13222561f9cf4
Reviewed-on: https://go-review.googlesource.com/9543
Reviewed-by: Austin Clements <austin@google.com>
2015-04-29 22:17:09 -06:00
|
|
|
|
|
|
|
// Assume background mark hit its utilization goal.
|
|
|
|
utilization := gcGoalUtilization
|
|
|
|
// Add assist utilization; avoid divide by zero.
|
|
|
|
if duration > 0 {
|
|
|
|
utilization += float64(c.assistTime) / float64(duration*int64(gomaxprocs))
|
2015-04-21 11:46:54 -06:00
|
|
|
}
|
runtime: schedule GC work more aggressively
Schedule the work as early as possible, while still respecting the
utilization percentage on average. The old code tried never to
go above the utilization percentage. The new code is willing
to go above the utilization percentage by one time slice
(but of course after doing that it must wait until the percentage
drops back down to the target before it gets another time slice).
The effect is that for concurrent GCs that can run in a small number
of time slices, the time during which write barriers are enabled is
reduced by one mutator + GC time slice round (possibly 30 ms per GC).
This only affects the fractional GC processor (the remainder of GOMAXPROCS/4),
so it matters most in GOMAXPROCS=1, a bit in GOMAXPROCS=2, and not at
all in GOMAXPROCS=4.
GOMAXPROCS=1
name old mean new mean delta
BenchmarkBinaryTree17 12.4s × (0.98,1.03) 13.5s × (0.97,1.04) +8.84% (p=0.000)
BenchmarkFannkuch11 4.38s × (1.00,1.01) 4.38s × (1.00,1.01) ~ (p=0.343)
BenchmarkFmtFprintfEmpty 88.9ns × (0.97,1.10) 90.1ns × (0.93,1.14) ~ (p=0.224)
BenchmarkFmtFprintfString 356ns × (0.94,1.05) 321ns × (0.94,1.12) -9.77% (p=0.000)
BenchmarkFmtFprintfInt 344ns × (0.98,1.03) 325ns × (0.96,1.03) -5.46% (p=0.000)
BenchmarkFmtFprintfIntInt 622ns × (0.97,1.03) 571ns × (0.95,1.05) -8.09% (p=0.000)
BenchmarkFmtFprintfPrefixedInt 462ns × (0.96,1.04) 431ns × (0.95,1.05) -6.81% (p=0.000)
BenchmarkFmtFprintfFloat 653ns × (0.98,1.03) 621ns × (0.99,1.03) -4.90% (p=0.000)
BenchmarkFmtManyArgs 2.32µs × (0.97,1.03) 2.19µs × (0.98,1.02) -5.43% (p=0.000)
BenchmarkGobDecode 27.0ms × (0.96,1.04) 20.0ms × (0.97,1.04) -26.06% (p=0.000)
BenchmarkGobEncode 26.6ms × (0.99,1.01) 17.8ms × (0.95,1.05) -33.19% (p=0.000)
BenchmarkGzip 659ms × (0.98,1.03) 650ms × (0.99,1.01) -1.34% (p=0.000)
BenchmarkGunzip 145ms × (0.98,1.04) 143ms × (1.00,1.01) -1.47% (p=0.000)
BenchmarkHTTPClientServer 111µs × (0.97,1.04) 110µs × (0.96,1.03) -1.30% (p=0.000)
BenchmarkJSONEncode 52.0ms × (0.97,1.03) 40.8ms × (0.97,1.03) -21.47% (p=0.000)
BenchmarkJSONDecode 127ms × (0.98,1.04) 120ms × (0.98,1.02) -5.55% (p=0.000)
BenchmarkMandelbrot200 6.04ms × (0.99,1.04) 6.02ms × (1.00,1.01) ~ (p=0.176)
BenchmarkGoParse 8.62ms × (0.96,1.08) 8.55ms × (0.93,1.09) ~ (p=0.302)
BenchmarkRegexpMatchEasy0_32 164ns × (0.98,1.05) 165ns × (0.98,1.07) ~ (p=0.293)
BenchmarkRegexpMatchEasy0_1K 546ns × (0.98,1.06) 547ns × (0.97,1.07) ~ (p=0.741)
BenchmarkRegexpMatchEasy1_32 142ns × (0.97,1.09) 141ns × (0.97,1.05) ~ (p=0.231)
BenchmarkRegexpMatchEasy1_1K 904ns × (0.97,1.07) 900ns × (0.98,1.04) ~ (p=0.294)
BenchmarkRegexpMatchMedium_32 256ns × (0.98,1.06) 256ns × (0.97,1.04) ~ (p=0.530)
BenchmarkRegexpMatchMedium_1K 74.2µs × (0.98,1.05) 73.8µs × (0.98,1.04) ~ (p=0.334)
BenchmarkRegexpMatchHard_32 3.94µs × (0.98,1.07) 3.92µs × (0.98,1.05) ~ (p=0.356)
BenchmarkRegexpMatchHard_1K 119µs × (0.98,1.07) 119µs × (0.98,1.06) ~ (p=0.467)
BenchmarkRevcomp 978ms × (0.96,1.09) 984ms × (0.95,1.07) ~ (p=0.448)
BenchmarkTemplate 151ms × (0.96,1.03) 142ms × (0.95,1.04) -5.55% (p=0.000)
BenchmarkTimeParse 628ns × (0.99,1.01) 628ns × (0.99,1.01) ~ (p=0.855)
BenchmarkTimeFormat 729ns × (0.98,1.06) 734ns × (0.97,1.05) ~ (p=0.149)
GOMAXPROCS=2
name old mean new mean delta
BenchmarkBinaryTree17-2 9.80s × (0.97,1.03) 9.85s × (0.99,1.02) ~ (p=0.444)
BenchmarkFannkuch11-2 4.35s × (0.99,1.01) 4.40s × (0.98,1.05) ~ (p=0.099)
BenchmarkFmtFprintfEmpty-2 86.7ns × (0.97,1.05) 85.9ns × (0.98,1.04) ~ (p=0.409)
BenchmarkFmtFprintfString-2 297ns × (0.98,1.01) 297ns × (0.99,1.01) ~ (p=0.743)
BenchmarkFmtFprintfInt-2 309ns × (0.98,1.02) 310ns × (0.99,1.01) ~ (p=0.464)
BenchmarkFmtFprintfIntInt-2 525ns × (0.97,1.05) 518ns × (0.99,1.01) ~ (p=0.151)
BenchmarkFmtFprintfPrefixedInt-2 408ns × (0.98,1.02) 408ns × (0.98,1.03) ~ (p=0.797)
BenchmarkFmtFprintfFloat-2 603ns × (0.99,1.01) 604ns × (0.98,1.02) ~ (p=0.588)
BenchmarkFmtManyArgs-2 2.07µs × (0.98,1.02) 2.05µs × (0.99,1.01) ~ (p=0.091)
BenchmarkGobDecode-2 19.1ms × (0.97,1.01) 19.3ms × (0.97,1.04) ~ (p=0.195)
BenchmarkGobEncode-2 16.2ms × (0.97,1.03) 16.4ms × (0.99,1.01) ~ (p=0.069)
BenchmarkGzip-2 652ms × (0.99,1.01) 651ms × (0.99,1.01) ~ (p=0.705)
BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.00) ~ (p=0.665)
BenchmarkHTTPClientServer-2 149µs × (0.92,1.11) 149µs × (0.91,1.08) ~ (p=0.862)
BenchmarkJSONEncode-2 34.6ms × (0.98,1.02) 37.2ms × (0.99,1.01) +7.56% (p=0.000)
BenchmarkJSONDecode-2 117ms × (0.99,1.01) 117ms × (0.99,1.01) ~ (p=0.858)
BenchmarkMandelbrot200-2 6.10ms × (0.99,1.03) 6.03ms × (1.00,1.00) ~ (p=0.083)
BenchmarkGoParse-2 8.25ms × (0.98,1.01) 8.21ms × (0.99,1.02) ~ (p=0.307)
BenchmarkRegexpMatchEasy0_32-2 162ns × (0.99,1.02) 162ns × (0.99,1.01) ~ (p=0.857)
BenchmarkRegexpMatchEasy0_1K-2 541ns × (0.99,1.01) 540ns × (1.00,1.00) ~ (p=0.530)
BenchmarkRegexpMatchEasy1_32-2 138ns × (1.00,1.00) 141ns × (0.98,1.04) +1.88% (p=0.038)
BenchmarkRegexpMatchEasy1_1K-2 887ns × (0.99,1.01) 894ns × (0.99,1.01) ~ (p=0.087)
BenchmarkRegexpMatchMedium_32-2 252ns × (0.99,1.01) 252ns × (0.99,1.01) ~ (p=0.954)
BenchmarkRegexpMatchMedium_1K-2 73.4µs × (0.99,1.02) 72.8µs × (1.00,1.01) -0.87% (p=0.029)
BenchmarkRegexpMatchHard_32-2 3.95µs × (0.97,1.05) 3.87µs × (1.00,1.01) -2.11% (p=0.035)
BenchmarkRegexpMatchHard_1K-2 117µs × (0.99,1.01) 117µs × (0.99,1.01) ~ (p=0.669)
BenchmarkRevcomp-2 980ms × (0.95,1.03) 993ms × (0.94,1.09) ~ (p=0.527)
BenchmarkTemplate-2 136ms × (0.98,1.01) 135ms × (0.99,1.01) ~ (p=0.200)
BenchmarkTimeParse-2 630ns × (1.00,1.01) 630ns × (1.00,1.00) ~ (p=0.634)
BenchmarkTimeFormat-2 705ns × (0.99,1.01) 710ns × (0.98,1.02) ~ (p=0.174)
GOMAXPROCS=4
BenchmarkBinaryTree17-4 9.87s × (0.96,1.04) 9.75s × (0.96,1.03) ~ (p=0.178)
BenchmarkFannkuch11-4 4.35s × (1.00,1.01) 4.40s × (0.99,1.04) ~ (p=0.071)
BenchmarkFmtFprintfEmpty-4 85.8ns × (0.98,1.06) 85.6ns × (0.98,1.04) ~ (p=0.858)
BenchmarkFmtFprintfString-4 306ns × (0.99,1.03) 304ns × (0.97,1.02) ~ (p=0.470)
BenchmarkFmtFprintfInt-4 317ns × (0.98,1.01) 315ns × (0.98,1.02) -0.92% (p=0.044)
BenchmarkFmtFprintfIntInt-4 527ns × (0.99,1.01) 525ns × (0.98,1.01) ~ (p=0.164)
BenchmarkFmtFprintfPrefixedInt-4 421ns × (0.98,1.03) 417ns × (0.99,1.02) ~ (p=0.092)
BenchmarkFmtFprintfFloat-4 623ns × (0.98,1.02) 618ns × (0.98,1.03) ~ (p=0.172)
BenchmarkFmtManyArgs-4 2.09µs × (0.98,1.02) 2.09µs × (0.98,1.02) ~ (p=0.679)
BenchmarkGobDecode-4 18.6ms × (0.99,1.01) 18.6ms × (0.98,1.03) ~ (p=0.595)
BenchmarkGobEncode-4 15.0ms × (0.98,1.02) 15.1ms × (0.99,1.01) ~ (p=0.301)
BenchmarkGzip-4 659ms × (0.98,1.04) 660ms × (0.97,1.02) ~ (p=0.724)
BenchmarkGunzip-4 145ms × (0.98,1.04) 144ms × (0.99,1.04) ~ (p=0.671)
BenchmarkHTTPClientServer-4 139µs × (0.97,1.02) 138µs × (0.99,1.02) ~ (p=0.392)
BenchmarkJSONEncode-4 35.0ms × (0.99,1.02) 35.1ms × (0.98,1.02) ~ (p=0.777)
BenchmarkJSONDecode-4 119ms × (0.98,1.01) 118ms × (0.98,1.02) ~ (p=0.710)
BenchmarkMandelbrot200-4 6.02ms × (1.00,1.00) 6.02ms × (1.00,1.00) ~ (p=0.289)
BenchmarkGoParse-4 7.96ms × (0.99,1.01) 7.96ms × (0.99,1.01) ~ (p=0.884)
BenchmarkRegexpMatchEasy0_32-4 164ns × (0.98,1.04) 166ns × (0.97,1.04) ~ (p=0.221)
BenchmarkRegexpMatchEasy0_1K-4 540ns × (0.99,1.01) 552ns × (0.97,1.04) +2.10% (p=0.018)
BenchmarkRegexpMatchEasy1_32-4 140ns × (0.99,1.04) 142ns × (0.97,1.04) ~ (p=0.226)
BenchmarkRegexpMatchEasy1_1K-4 896ns × (0.99,1.03) 907ns × (0.97,1.04) ~ (p=0.155)
BenchmarkRegexpMatchMedium_32-4 255ns × (0.99,1.04) 255ns × (0.98,1.04) ~ (p=0.904)
BenchmarkRegexpMatchMedium_1K-4 73.4µs × (0.99,1.04) 73.8µs × (0.98,1.04) ~ (p=0.560)
BenchmarkRegexpMatchHard_32-4 3.93µs × (0.98,1.04) 3.95µs × (0.98,1.04) ~ (p=0.571)
BenchmarkRegexpMatchHard_1K-4 117µs × (1.00,1.01) 119µs × (0.98,1.04) +1.48% (p=0.048)
BenchmarkRevcomp-4 990ms × (0.94,1.08) 989ms × (0.94,1.10) ~ (p=0.957)
BenchmarkTemplate-4 137ms × (0.98,1.02) 137ms × (0.99,1.01) ~ (p=0.996)
BenchmarkTimeParse-4 629ns × (1.00,1.00) 629ns × (0.99,1.01) ~ (p=0.924)
BenchmarkTimeFormat-4 710ns × (0.99,1.01) 716ns × (0.98,1.02) +0.84% (p=0.033)
Change-Id: I43a04e0f6ad5e3ba9847dddf12e13222561f9cf4
Reviewed-on: https://go-review.googlesource.com/9543
Reviewed-by: Austin Clements <austin@google.com>
2015-04-29 22:17:09 -06:00
|
|
|
|
2015-03-24 08:45:20 -06:00
|
|
|
triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
|
|
|
|
|
|
|
|
// Finally, we adjust the trigger for next time by this error,
|
|
|
|
// damped by the proportional gain.
|
|
|
|
c.triggerRatio += triggerGain * triggerError
|
|
|
|
if c.triggerRatio < 0 {
|
|
|
|
// This can happen if the mutator is allocating very
|
|
|
|
// quickly or the GC is scanning very slowly.
|
|
|
|
c.triggerRatio = 0
|
|
|
|
} else if c.triggerRatio > goalGrowthRatio*0.95 {
|
|
|
|
// Ensure there's always a little margin so that the
|
|
|
|
// mutator assist ratio isn't infinity.
|
|
|
|
c.triggerRatio = goalGrowthRatio * 0.95
|
|
|
|
}
|
|
|
|
|
2015-04-06 12:30:03 -06:00
|
|
|
if debug.gcpacertrace > 0 {
|
|
|
|
// Print controller state in terms of the design
|
|
|
|
// document.
|
|
|
|
H_m_prev := memstats.heap_marked
|
|
|
|
H_T := memstats.next_gc
|
|
|
|
h_a := actualGrowthRatio
|
|
|
|
H_a := memstats.heap_live
|
|
|
|
h_g := goalGrowthRatio
|
|
|
|
H_g := int64(float64(H_m_prev) * (1 + h_g))
|
|
|
|
u_a := utilization
|
|
|
|
u_g := gcGoalUtilization
|
|
|
|
W_a := c.scanWork
|
|
|
|
print("pacer: H_m_prev=", H_m_prev,
|
|
|
|
" h_t=", h_t, " H_T=", H_T,
|
|
|
|
" h_a=", h_a, " H_a=", H_a,
|
|
|
|
" h_g=", h_g, " H_g=", H_g,
|
|
|
|
" u_a=", u_a, " u_g=", u_g,
|
runtime: use heap scan size as estimate of GC scan work
Currently, the GC uses a moving average of recent scan work ratios to
estimate the total scan work required by this cycle. This is in turn
used to compute how much scan work should be done by mutators when
they allocate in order to perform all expected scan work by the time
the allocated heap reaches the heap goal.
However, our current scan work estimate can be arbitrarily wrong if
the heap topography changes significantly from one cycle to the
next. For example, in the go1 benchmarks, at the beginning of each
benchmark, the heap is dominated by a 256MB no-scan object, so the GC
learns that the scan density of the heap is very low. In benchmarks
that then rapidly allocate pointer-dense objects, by the time of the
next GC cycle, our estimate of the scan work can be too low by a large
factor. This in turn lets the mutator allocate faster than the GC can
collect, allowing it to get arbitrarily far ahead of the scan work
estimate, which leads to very long GC cycles with very little mutator
assist that can overshoot the heap goal by large margins. This is
particularly easy to demonstrate with BinaryTree17:
$ GODEBUG=gctrace=1 ./go1.test -test.bench BinaryTree17
gc #1 @0.017s 2%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 4->262->262 MB, 4 MB goal, 1 P
gc #2 @0.026s 3%: 0+0+0+0+0 ms clock, 0+0+0+0/0/0+0 ms cpu, 262->262->262 MB, 524 MB goal, 1 P
testing: warning: no tests to run
PASS
BenchmarkBinaryTree17 gc #3 @1.906s 0%: 0+0+0+0+7 ms clock, 0+0+0+0/0/0+7 ms cpu, 325->325->287 MB, 325 MB goal, 1 P (forced)
gc #4 @12.203s 20%: 0+0+0+10067+10 ms clock, 0+0+0+0/2523/852+10 ms cpu, 430->2092->1950 MB, 574 MB goal, 1 P
1 9150447353 ns/op
Change this estimate to instead use the *current* scannable heap
size. This has the advantage of being based solely on the current
state of the heap, not on past densities or reachable heap sizes, so
it isn't susceptible to falling behind during these sorts of phase
changes. This is strictly an over-estimate, but it's better to
over-estimate and get more assist than necessary than it is to
under-estimate and potentially spiral out of control. Experiments with
scaling this estimate back showed no obvious benefit for mutator
utilization, heap size, or assist time.
This new estimate has little effect for most benchmarks, including
most go1 benchmarks, x/benchmarks, and the 6g benchmark. It has a huge
effect for benchmarks that triggered the bad pacer behavior:
name old mean new mean delta
BinaryTree17 10.0s × (1.00,1.00) 3.5s × (0.98,1.01) -64.93% (p=0.000)
Fannkuch11 2.74s × (1.00,1.01) 2.65s × (1.00,1.00) -3.52% (p=0.000)
FmtFprintfEmpty 56.4ns × (0.99,1.00) 57.8ns × (1.00,1.01) +2.43% (p=0.000)
FmtFprintfString 187ns × (0.99,1.00) 185ns × (0.99,1.01) -1.19% (p=0.010)
FmtFprintfInt 184ns × (1.00,1.00) 183ns × (1.00,1.00) (no variance)
FmtFprintfIntInt 321ns × (1.00,1.00) 315ns × (1.00,1.00) -1.80% (p=0.000)
FmtFprintfPrefixedInt 266ns × (1.00,1.00) 263ns × (1.00,1.00) -1.22% (p=0.000)
FmtFprintfFloat 353ns × (1.00,1.00) 353ns × (1.00,1.00) -0.13% (p=0.035)
FmtManyArgs 1.21µs × (1.00,1.00) 1.19µs × (1.00,1.00) -1.33% (p=0.000)
GobDecode 9.69ms × (1.00,1.00) 9.59ms × (1.00,1.00) -1.07% (p=0.000)
GobEncode 7.89ms × (0.99,1.01) 7.74ms × (1.00,1.00) -1.92% (p=0.000)
Gzip 391ms × (1.00,1.00) 392ms × (1.00,1.00) ~ (p=0.522)
Gunzip 97.1ms × (1.00,1.00) 97.0ms × (1.00,1.00) -0.10% (p=0.000)
HTTPClientServer 55.7µs × (0.99,1.01) 56.7µs × (0.99,1.01) +1.81% (p=0.001)
JSONEncode 19.1ms × (1.00,1.00) 19.0ms × (1.00,1.00) -0.85% (p=0.000)
JSONDecode 66.8ms × (1.00,1.00) 66.9ms × (1.00,1.00) ~ (p=0.288)
Mandelbrot200 4.13ms × (1.00,1.00) 4.12ms × (1.00,1.00) -0.08% (p=0.000)
GoParse 3.97ms × (1.00,1.01) 4.01ms × (1.00,1.00) +0.99% (p=0.000)
RegexpMatchEasy0_32 114ns × (1.00,1.00) 115ns × (0.99,1.00) ~ (p=0.070)
RegexpMatchEasy0_1K 376ns × (1.00,1.00) 376ns × (1.00,1.00) ~ (p=0.900)
RegexpMatchEasy1_32 94.9ns × (1.00,1.00) 96.3ns × (1.00,1.01) +1.53% (p=0.001)
RegexpMatchEasy1_1K 568ns × (1.00,1.00) 567ns × (1.00,1.00) -0.22% (p=0.001)
RegexpMatchMedium_32 159ns × (1.00,1.00) 159ns × (1.00,1.00) ~ (p=0.178)
RegexpMatchMedium_1K 46.4µs × (1.00,1.00) 46.6µs × (1.00,1.00) +0.29% (p=0.000)
RegexpMatchHard_32 2.37µs × (1.00,1.00) 2.37µs × (1.00,1.00) ~ (p=0.722)
RegexpMatchHard_1K 71.1µs × (1.00,1.00) 71.2µs × (1.00,1.00) ~ (p=0.229)
Revcomp 565ms × (1.00,1.00) 562ms × (1.00,1.00) -0.52% (p=0.000)
Template 81.0ms × (1.00,1.00) 80.2ms × (1.00,1.00) -0.97% (p=0.000)
TimeParse 380ns × (1.00,1.00) 380ns × (1.00,1.00) ~ (p=0.148)
TimeFormat 405ns × (0.99,1.00) 385ns × (0.99,1.00) -5.00% (p=0.000)
Change-Id: I11274158bf3affaf62662e02de7af12d5fb789e4
Reviewed-on: https://go-review.googlesource.com/9696
Reviewed-by: Russ Cox <rsc@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
2015-05-04 14:55:31 -06:00
|
|
|
" W_a=", W_a,
|
2015-04-06 12:30:03 -06:00
|
|
|
" goalΔ=", goalGrowthRatio-h_t,
|
|
|
|
" actualΔ=", h_a-h_t,
|
|
|
|
" u_a/u_g=", u_a/u_g,
|
|
|
|
"\n")
|
|
|
|
}
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
}
|
|
|
|
|
2015-04-24 12:17:42 -06:00
|
|
|
// findRunnableGCWorker returns the background mark worker for _p_ if it
|
2015-03-27 15:01:53 -06:00
|
|
|
// should be run. This must only be called when gcBlackenEnabled != 0.
|
2015-04-24 12:17:42 -06:00
|
|
|
func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
|
2015-03-27 15:01:53 -06:00
|
|
|
if gcBlackenEnabled == 0 {
|
|
|
|
throw("gcControllerState.findRunnable: blackening not enabled")
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
}
|
|
|
|
if _p_.gcBgMarkWorker == nil {
|
|
|
|
throw("gcControllerState.findRunnable: no background mark worker")
|
|
|
|
}
|
|
|
|
if work.bgMarkDone != 0 {
|
|
|
|
// Background mark is done. Don't schedule background
|
|
|
|
// mark worker any more. (This is not just an
|
|
|
|
// optimization. Without this we can spin scheduling
|
|
|
|
// the background worker and having it return
|
|
|
|
// immediately with no work to do.)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
decIfPositive := func(ptr *int64) bool {
|
|
|
|
if *ptr > 0 {
|
|
|
|
if xaddint64(ptr, -1) >= 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// We lost a race
|
|
|
|
xaddint64(ptr, +1)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
|
|
|
|
// This P is now dedicated to marking until the end of
|
|
|
|
// the concurrent mark phase.
|
|
|
|
_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
|
|
|
|
// TODO(austin): This P isn't going to run anything
|
|
|
|
// else for a while, so kick everything out of its run
|
|
|
|
// queue.
|
2015-04-23 17:51:03 -06:00
|
|
|
} else {
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
if _p_.gcw.wbuf == 0 && work.full == 0 && work.partial == 0 {
|
2015-04-23 17:51:03 -06:00
|
|
|
// No work to be done right now. This can
|
|
|
|
// happen at the end of the mark phase when
|
|
|
|
// there are still assists tapering off. Don't
|
|
|
|
// bother running background mark because
|
|
|
|
// it'll just return immediately.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !decIfPositive(&c.fractionalMarkWorkersNeeded) {
|
|
|
|
// No more workers are need right now.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
runtime: schedule GC work more aggressively
Schedule the work as early as possible, while still respecting the
utilization percentage on average. The old code tried never to
go above the utilization percentage. The new code is willing
to go above the utilization percentage by one time slice
(but of course after doing that it must wait until the percentage
drops back down to the target before it gets another time slice).
The effect is that for concurrent GCs that can run in a small number
of time slices, the time during which write barriers are enabled is
reduced by one mutator + GC time slice round (possibly 30 ms per GC).
This only affects the fractional GC processor (the remainder of GOMAXPROCS/4),
so it matters most in GOMAXPROCS=1, a bit in GOMAXPROCS=2, and not at
all in GOMAXPROCS=4.
GOMAXPROCS=1
name old mean new mean delta
BenchmarkBinaryTree17 12.4s × (0.98,1.03) 13.5s × (0.97,1.04) +8.84% (p=0.000)
BenchmarkFannkuch11 4.38s × (1.00,1.01) 4.38s × (1.00,1.01) ~ (p=0.343)
BenchmarkFmtFprintfEmpty 88.9ns × (0.97,1.10) 90.1ns × (0.93,1.14) ~ (p=0.224)
BenchmarkFmtFprintfString 356ns × (0.94,1.05) 321ns × (0.94,1.12) -9.77% (p=0.000)
BenchmarkFmtFprintfInt 344ns × (0.98,1.03) 325ns × (0.96,1.03) -5.46% (p=0.000)
BenchmarkFmtFprintfIntInt 622ns × (0.97,1.03) 571ns × (0.95,1.05) -8.09% (p=0.000)
BenchmarkFmtFprintfPrefixedInt 462ns × (0.96,1.04) 431ns × (0.95,1.05) -6.81% (p=0.000)
BenchmarkFmtFprintfFloat 653ns × (0.98,1.03) 621ns × (0.99,1.03) -4.90% (p=0.000)
BenchmarkFmtManyArgs 2.32µs × (0.97,1.03) 2.19µs × (0.98,1.02) -5.43% (p=0.000)
BenchmarkGobDecode 27.0ms × (0.96,1.04) 20.0ms × (0.97,1.04) -26.06% (p=0.000)
BenchmarkGobEncode 26.6ms × (0.99,1.01) 17.8ms × (0.95,1.05) -33.19% (p=0.000)
BenchmarkGzip 659ms × (0.98,1.03) 650ms × (0.99,1.01) -1.34% (p=0.000)
BenchmarkGunzip 145ms × (0.98,1.04) 143ms × (1.00,1.01) -1.47% (p=0.000)
BenchmarkHTTPClientServer 111µs × (0.97,1.04) 110µs × (0.96,1.03) -1.30% (p=0.000)
BenchmarkJSONEncode 52.0ms × (0.97,1.03) 40.8ms × (0.97,1.03) -21.47% (p=0.000)
BenchmarkJSONDecode 127ms × (0.98,1.04) 120ms × (0.98,1.02) -5.55% (p=0.000)
BenchmarkMandelbrot200 6.04ms × (0.99,1.04) 6.02ms × (1.00,1.01) ~ (p=0.176)
BenchmarkGoParse 8.62ms × (0.96,1.08) 8.55ms × (0.93,1.09) ~ (p=0.302)
BenchmarkRegexpMatchEasy0_32 164ns × (0.98,1.05) 165ns × (0.98,1.07) ~ (p=0.293)
BenchmarkRegexpMatchEasy0_1K 546ns × (0.98,1.06) 547ns × (0.97,1.07) ~ (p=0.741)
BenchmarkRegexpMatchEasy1_32 142ns × (0.97,1.09) 141ns × (0.97,1.05) ~ (p=0.231)
BenchmarkRegexpMatchEasy1_1K 904ns × (0.97,1.07) 900ns × (0.98,1.04) ~ (p=0.294)
BenchmarkRegexpMatchMedium_32 256ns × (0.98,1.06) 256ns × (0.97,1.04) ~ (p=0.530)
BenchmarkRegexpMatchMedium_1K 74.2µs × (0.98,1.05) 73.8µs × (0.98,1.04) ~ (p=0.334)
BenchmarkRegexpMatchHard_32 3.94µs × (0.98,1.07) 3.92µs × (0.98,1.05) ~ (p=0.356)
BenchmarkRegexpMatchHard_1K 119µs × (0.98,1.07) 119µs × (0.98,1.06) ~ (p=0.467)
BenchmarkRevcomp 978ms × (0.96,1.09) 984ms × (0.95,1.07) ~ (p=0.448)
BenchmarkTemplate 151ms × (0.96,1.03) 142ms × (0.95,1.04) -5.55% (p=0.000)
BenchmarkTimeParse 628ns × (0.99,1.01) 628ns × (0.99,1.01) ~ (p=0.855)
BenchmarkTimeFormat 729ns × (0.98,1.06) 734ns × (0.97,1.05) ~ (p=0.149)
GOMAXPROCS=2
name old mean new mean delta
BenchmarkBinaryTree17-2 9.80s × (0.97,1.03) 9.85s × (0.99,1.02) ~ (p=0.444)
BenchmarkFannkuch11-2 4.35s × (0.99,1.01) 4.40s × (0.98,1.05) ~ (p=0.099)
BenchmarkFmtFprintfEmpty-2 86.7ns × (0.97,1.05) 85.9ns × (0.98,1.04) ~ (p=0.409)
BenchmarkFmtFprintfString-2 297ns × (0.98,1.01) 297ns × (0.99,1.01) ~ (p=0.743)
BenchmarkFmtFprintfInt-2 309ns × (0.98,1.02) 310ns × (0.99,1.01) ~ (p=0.464)
BenchmarkFmtFprintfIntInt-2 525ns × (0.97,1.05) 518ns × (0.99,1.01) ~ (p=0.151)
BenchmarkFmtFprintfPrefixedInt-2 408ns × (0.98,1.02) 408ns × (0.98,1.03) ~ (p=0.797)
BenchmarkFmtFprintfFloat-2 603ns × (0.99,1.01) 604ns × (0.98,1.02) ~ (p=0.588)
BenchmarkFmtManyArgs-2 2.07µs × (0.98,1.02) 2.05µs × (0.99,1.01) ~ (p=0.091)
BenchmarkGobDecode-2 19.1ms × (0.97,1.01) 19.3ms × (0.97,1.04) ~ (p=0.195)
BenchmarkGobEncode-2 16.2ms × (0.97,1.03) 16.4ms × (0.99,1.01) ~ (p=0.069)
BenchmarkGzip-2 652ms × (0.99,1.01) 651ms × (0.99,1.01) ~ (p=0.705)
BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.00) ~ (p=0.665)
BenchmarkHTTPClientServer-2 149µs × (0.92,1.11) 149µs × (0.91,1.08) ~ (p=0.862)
BenchmarkJSONEncode-2 34.6ms × (0.98,1.02) 37.2ms × (0.99,1.01) +7.56% (p=0.000)
BenchmarkJSONDecode-2 117ms × (0.99,1.01) 117ms × (0.99,1.01) ~ (p=0.858)
BenchmarkMandelbrot200-2 6.10ms × (0.99,1.03) 6.03ms × (1.00,1.00) ~ (p=0.083)
BenchmarkGoParse-2 8.25ms × (0.98,1.01) 8.21ms × (0.99,1.02) ~ (p=0.307)
BenchmarkRegexpMatchEasy0_32-2 162ns × (0.99,1.02) 162ns × (0.99,1.01) ~ (p=0.857)
BenchmarkRegexpMatchEasy0_1K-2 541ns × (0.99,1.01) 540ns × (1.00,1.00) ~ (p=0.530)
BenchmarkRegexpMatchEasy1_32-2 138ns × (1.00,1.00) 141ns × (0.98,1.04) +1.88% (p=0.038)
BenchmarkRegexpMatchEasy1_1K-2 887ns × (0.99,1.01) 894ns × (0.99,1.01) ~ (p=0.087)
BenchmarkRegexpMatchMedium_32-2 252ns × (0.99,1.01) 252ns × (0.99,1.01) ~ (p=0.954)
BenchmarkRegexpMatchMedium_1K-2 73.4µs × (0.99,1.02) 72.8µs × (1.00,1.01) -0.87% (p=0.029)
BenchmarkRegexpMatchHard_32-2 3.95µs × (0.97,1.05) 3.87µs × (1.00,1.01) -2.11% (p=0.035)
BenchmarkRegexpMatchHard_1K-2 117µs × (0.99,1.01) 117µs × (0.99,1.01) ~ (p=0.669)
BenchmarkRevcomp-2 980ms × (0.95,1.03) 993ms × (0.94,1.09) ~ (p=0.527)
BenchmarkTemplate-2 136ms × (0.98,1.01) 135ms × (0.99,1.01) ~ (p=0.200)
BenchmarkTimeParse-2 630ns × (1.00,1.01) 630ns × (1.00,1.00) ~ (p=0.634)
BenchmarkTimeFormat-2 705ns × (0.99,1.01) 710ns × (0.98,1.02) ~ (p=0.174)
GOMAXPROCS=4
BenchmarkBinaryTree17-4 9.87s × (0.96,1.04) 9.75s × (0.96,1.03) ~ (p=0.178)
BenchmarkFannkuch11-4 4.35s × (1.00,1.01) 4.40s × (0.99,1.04) ~ (p=0.071)
BenchmarkFmtFprintfEmpty-4 85.8ns × (0.98,1.06) 85.6ns × (0.98,1.04) ~ (p=0.858)
BenchmarkFmtFprintfString-4 306ns × (0.99,1.03) 304ns × (0.97,1.02) ~ (p=0.470)
BenchmarkFmtFprintfInt-4 317ns × (0.98,1.01) 315ns × (0.98,1.02) -0.92% (p=0.044)
BenchmarkFmtFprintfIntInt-4 527ns × (0.99,1.01) 525ns × (0.98,1.01) ~ (p=0.164)
BenchmarkFmtFprintfPrefixedInt-4 421ns × (0.98,1.03) 417ns × (0.99,1.02) ~ (p=0.092)
BenchmarkFmtFprintfFloat-4 623ns × (0.98,1.02) 618ns × (0.98,1.03) ~ (p=0.172)
BenchmarkFmtManyArgs-4 2.09µs × (0.98,1.02) 2.09µs × (0.98,1.02) ~ (p=0.679)
BenchmarkGobDecode-4 18.6ms × (0.99,1.01) 18.6ms × (0.98,1.03) ~ (p=0.595)
BenchmarkGobEncode-4 15.0ms × (0.98,1.02) 15.1ms × (0.99,1.01) ~ (p=0.301)
BenchmarkGzip-4 659ms × (0.98,1.04) 660ms × (0.97,1.02) ~ (p=0.724)
BenchmarkGunzip-4 145ms × (0.98,1.04) 144ms × (0.99,1.04) ~ (p=0.671)
BenchmarkHTTPClientServer-4 139µs × (0.97,1.02) 138µs × (0.99,1.02) ~ (p=0.392)
BenchmarkJSONEncode-4 35.0ms × (0.99,1.02) 35.1ms × (0.98,1.02) ~ (p=0.777)
BenchmarkJSONDecode-4 119ms × (0.98,1.01) 118ms × (0.98,1.02) ~ (p=0.710)
BenchmarkMandelbrot200-4 6.02ms × (1.00,1.00) 6.02ms × (1.00,1.00) ~ (p=0.289)
BenchmarkGoParse-4 7.96ms × (0.99,1.01) 7.96ms × (0.99,1.01) ~ (p=0.884)
BenchmarkRegexpMatchEasy0_32-4 164ns × (0.98,1.04) 166ns × (0.97,1.04) ~ (p=0.221)
BenchmarkRegexpMatchEasy0_1K-4 540ns × (0.99,1.01) 552ns × (0.97,1.04) +2.10% (p=0.018)
BenchmarkRegexpMatchEasy1_32-4 140ns × (0.99,1.04) 142ns × (0.97,1.04) ~ (p=0.226)
BenchmarkRegexpMatchEasy1_1K-4 896ns × (0.99,1.03) 907ns × (0.97,1.04) ~ (p=0.155)
BenchmarkRegexpMatchMedium_32-4 255ns × (0.99,1.04) 255ns × (0.98,1.04) ~ (p=0.904)
BenchmarkRegexpMatchMedium_1K-4 73.4µs × (0.99,1.04) 73.8µs × (0.98,1.04) ~ (p=0.560)
BenchmarkRegexpMatchHard_32-4 3.93µs × (0.98,1.04) 3.95µs × (0.98,1.04) ~ (p=0.571)
BenchmarkRegexpMatchHard_1K-4 117µs × (1.00,1.01) 119µs × (0.98,1.04) +1.48% (p=0.048)
BenchmarkRevcomp-4 990ms × (0.94,1.08) 989ms × (0.94,1.10) ~ (p=0.957)
BenchmarkTemplate-4 137ms × (0.98,1.02) 137ms × (0.99,1.01) ~ (p=0.996)
BenchmarkTimeParse-4 629ns × (1.00,1.00) 629ns × (0.99,1.01) ~ (p=0.924)
BenchmarkTimeFormat-4 710ns × (0.99,1.01) 716ns × (0.98,1.02) +0.84% (p=0.033)
Change-Id: I43a04e0f6ad5e3ba9847dddf12e13222561f9cf4
Reviewed-on: https://go-review.googlesource.com/9543
Reviewed-by: Austin Clements <austin@google.com>
2015-04-29 22:17:09 -06:00
|
|
|
// This P has picked the token for the fractional worker.
|
|
|
|
// Is the GC currently under or at the utilization goal?
|
|
|
|
// If so, do more work.
|
2015-04-15 15:01:30 -06:00
|
|
|
//
|
runtime: schedule GC work more aggressively
Schedule the work as early as possible, while still respecting the
utilization percentage on average. The old code tried never to
go above the utilization percentage. The new code is willing
to go above the utilization percentage by one time slice
(but of course after doing that it must wait until the percentage
drops back down to the target before it gets another time slice).
The effect is that for concurrent GCs that can run in a small number
of time slices, the time during which write barriers are enabled is
reduced by one mutator + GC time slice round (possibly 30 ms per GC).
This only affects the fractional GC processor (the remainder of GOMAXPROCS/4),
so it matters most in GOMAXPROCS=1, a bit in GOMAXPROCS=2, and not at
all in GOMAXPROCS=4.
GOMAXPROCS=1
name old mean new mean delta
BenchmarkBinaryTree17 12.4s × (0.98,1.03) 13.5s × (0.97,1.04) +8.84% (p=0.000)
BenchmarkFannkuch11 4.38s × (1.00,1.01) 4.38s × (1.00,1.01) ~ (p=0.343)
BenchmarkFmtFprintfEmpty 88.9ns × (0.97,1.10) 90.1ns × (0.93,1.14) ~ (p=0.224)
BenchmarkFmtFprintfString 356ns × (0.94,1.05) 321ns × (0.94,1.12) -9.77% (p=0.000)
BenchmarkFmtFprintfInt 344ns × (0.98,1.03) 325ns × (0.96,1.03) -5.46% (p=0.000)
BenchmarkFmtFprintfIntInt 622ns × (0.97,1.03) 571ns × (0.95,1.05) -8.09% (p=0.000)
BenchmarkFmtFprintfPrefixedInt 462ns × (0.96,1.04) 431ns × (0.95,1.05) -6.81% (p=0.000)
BenchmarkFmtFprintfFloat 653ns × (0.98,1.03) 621ns × (0.99,1.03) -4.90% (p=0.000)
BenchmarkFmtManyArgs 2.32µs × (0.97,1.03) 2.19µs × (0.98,1.02) -5.43% (p=0.000)
BenchmarkGobDecode 27.0ms × (0.96,1.04) 20.0ms × (0.97,1.04) -26.06% (p=0.000)
BenchmarkGobEncode 26.6ms × (0.99,1.01) 17.8ms × (0.95,1.05) -33.19% (p=0.000)
BenchmarkGzip 659ms × (0.98,1.03) 650ms × (0.99,1.01) -1.34% (p=0.000)
BenchmarkGunzip 145ms × (0.98,1.04) 143ms × (1.00,1.01) -1.47% (p=0.000)
BenchmarkHTTPClientServer 111µs × (0.97,1.04) 110µs × (0.96,1.03) -1.30% (p=0.000)
BenchmarkJSONEncode 52.0ms × (0.97,1.03) 40.8ms × (0.97,1.03) -21.47% (p=0.000)
BenchmarkJSONDecode 127ms × (0.98,1.04) 120ms × (0.98,1.02) -5.55% (p=0.000)
BenchmarkMandelbrot200 6.04ms × (0.99,1.04) 6.02ms × (1.00,1.01) ~ (p=0.176)
BenchmarkGoParse 8.62ms × (0.96,1.08) 8.55ms × (0.93,1.09) ~ (p=0.302)
BenchmarkRegexpMatchEasy0_32 164ns × (0.98,1.05) 165ns × (0.98,1.07) ~ (p=0.293)
BenchmarkRegexpMatchEasy0_1K 546ns × (0.98,1.06) 547ns × (0.97,1.07) ~ (p=0.741)
BenchmarkRegexpMatchEasy1_32 142ns × (0.97,1.09) 141ns × (0.97,1.05) ~ (p=0.231)
BenchmarkRegexpMatchEasy1_1K 904ns × (0.97,1.07) 900ns × (0.98,1.04) ~ (p=0.294)
BenchmarkRegexpMatchMedium_32 256ns × (0.98,1.06) 256ns × (0.97,1.04) ~ (p=0.530)
BenchmarkRegexpMatchMedium_1K 74.2µs × (0.98,1.05) 73.8µs × (0.98,1.04) ~ (p=0.334)
BenchmarkRegexpMatchHard_32 3.94µs × (0.98,1.07) 3.92µs × (0.98,1.05) ~ (p=0.356)
BenchmarkRegexpMatchHard_1K 119µs × (0.98,1.07) 119µs × (0.98,1.06) ~ (p=0.467)
BenchmarkRevcomp 978ms × (0.96,1.09) 984ms × (0.95,1.07) ~ (p=0.448)
BenchmarkTemplate 151ms × (0.96,1.03) 142ms × (0.95,1.04) -5.55% (p=0.000)
BenchmarkTimeParse 628ns × (0.99,1.01) 628ns × (0.99,1.01) ~ (p=0.855)
BenchmarkTimeFormat 729ns × (0.98,1.06) 734ns × (0.97,1.05) ~ (p=0.149)
GOMAXPROCS=2
name old mean new mean delta
BenchmarkBinaryTree17-2 9.80s × (0.97,1.03) 9.85s × (0.99,1.02) ~ (p=0.444)
BenchmarkFannkuch11-2 4.35s × (0.99,1.01) 4.40s × (0.98,1.05) ~ (p=0.099)
BenchmarkFmtFprintfEmpty-2 86.7ns × (0.97,1.05) 85.9ns × (0.98,1.04) ~ (p=0.409)
BenchmarkFmtFprintfString-2 297ns × (0.98,1.01) 297ns × (0.99,1.01) ~ (p=0.743)
BenchmarkFmtFprintfInt-2 309ns × (0.98,1.02) 310ns × (0.99,1.01) ~ (p=0.464)
BenchmarkFmtFprintfIntInt-2 525ns × (0.97,1.05) 518ns × (0.99,1.01) ~ (p=0.151)
BenchmarkFmtFprintfPrefixedInt-2 408ns × (0.98,1.02) 408ns × (0.98,1.03) ~ (p=0.797)
BenchmarkFmtFprintfFloat-2 603ns × (0.99,1.01) 604ns × (0.98,1.02) ~ (p=0.588)
BenchmarkFmtManyArgs-2 2.07µs × (0.98,1.02) 2.05µs × (0.99,1.01) ~ (p=0.091)
BenchmarkGobDecode-2 19.1ms × (0.97,1.01) 19.3ms × (0.97,1.04) ~ (p=0.195)
BenchmarkGobEncode-2 16.2ms × (0.97,1.03) 16.4ms × (0.99,1.01) ~ (p=0.069)
BenchmarkGzip-2 652ms × (0.99,1.01) 651ms × (0.99,1.01) ~ (p=0.705)
BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.00) ~ (p=0.665)
BenchmarkHTTPClientServer-2 149µs × (0.92,1.11) 149µs × (0.91,1.08) ~ (p=0.862)
BenchmarkJSONEncode-2 34.6ms × (0.98,1.02) 37.2ms × (0.99,1.01) +7.56% (p=0.000)
BenchmarkJSONDecode-2 117ms × (0.99,1.01) 117ms × (0.99,1.01) ~ (p=0.858)
BenchmarkMandelbrot200-2 6.10ms × (0.99,1.03) 6.03ms × (1.00,1.00) ~ (p=0.083)
BenchmarkGoParse-2 8.25ms × (0.98,1.01) 8.21ms × (0.99,1.02) ~ (p=0.307)
BenchmarkRegexpMatchEasy0_32-2 162ns × (0.99,1.02) 162ns × (0.99,1.01) ~ (p=0.857)
BenchmarkRegexpMatchEasy0_1K-2 541ns × (0.99,1.01) 540ns × (1.00,1.00) ~ (p=0.530)
BenchmarkRegexpMatchEasy1_32-2 138ns × (1.00,1.00) 141ns × (0.98,1.04) +1.88% (p=0.038)
BenchmarkRegexpMatchEasy1_1K-2 887ns × (0.99,1.01) 894ns × (0.99,1.01) ~ (p=0.087)
BenchmarkRegexpMatchMedium_32-2 252ns × (0.99,1.01) 252ns × (0.99,1.01) ~ (p=0.954)
BenchmarkRegexpMatchMedium_1K-2 73.4µs × (0.99,1.02) 72.8µs × (1.00,1.01) -0.87% (p=0.029)
BenchmarkRegexpMatchHard_32-2 3.95µs × (0.97,1.05) 3.87µs × (1.00,1.01) -2.11% (p=0.035)
BenchmarkRegexpMatchHard_1K-2 117µs × (0.99,1.01) 117µs × (0.99,1.01) ~ (p=0.669)
BenchmarkRevcomp-2 980ms × (0.95,1.03) 993ms × (0.94,1.09) ~ (p=0.527)
BenchmarkTemplate-2 136ms × (0.98,1.01) 135ms × (0.99,1.01) ~ (p=0.200)
BenchmarkTimeParse-2 630ns × (1.00,1.01) 630ns × (1.00,1.00) ~ (p=0.634)
BenchmarkTimeFormat-2 705ns × (0.99,1.01) 710ns × (0.98,1.02) ~ (p=0.174)
GOMAXPROCS=4
BenchmarkBinaryTree17-4 9.87s × (0.96,1.04) 9.75s × (0.96,1.03) ~ (p=0.178)
BenchmarkFannkuch11-4 4.35s × (1.00,1.01) 4.40s × (0.99,1.04) ~ (p=0.071)
BenchmarkFmtFprintfEmpty-4 85.8ns × (0.98,1.06) 85.6ns × (0.98,1.04) ~ (p=0.858)
BenchmarkFmtFprintfString-4 306ns × (0.99,1.03) 304ns × (0.97,1.02) ~ (p=0.470)
BenchmarkFmtFprintfInt-4 317ns × (0.98,1.01) 315ns × (0.98,1.02) -0.92% (p=0.044)
BenchmarkFmtFprintfIntInt-4 527ns × (0.99,1.01) 525ns × (0.98,1.01) ~ (p=0.164)
BenchmarkFmtFprintfPrefixedInt-4 421ns × (0.98,1.03) 417ns × (0.99,1.02) ~ (p=0.092)
BenchmarkFmtFprintfFloat-4 623ns × (0.98,1.02) 618ns × (0.98,1.03) ~ (p=0.172)
BenchmarkFmtManyArgs-4 2.09µs × (0.98,1.02) 2.09µs × (0.98,1.02) ~ (p=0.679)
BenchmarkGobDecode-4 18.6ms × (0.99,1.01) 18.6ms × (0.98,1.03) ~ (p=0.595)
BenchmarkGobEncode-4 15.0ms × (0.98,1.02) 15.1ms × (0.99,1.01) ~ (p=0.301)
BenchmarkGzip-4 659ms × (0.98,1.04) 660ms × (0.97,1.02) ~ (p=0.724)
BenchmarkGunzip-4 145ms × (0.98,1.04) 144ms × (0.99,1.04) ~ (p=0.671)
BenchmarkHTTPClientServer-4 139µs × (0.97,1.02) 138µs × (0.99,1.02) ~ (p=0.392)
BenchmarkJSONEncode-4 35.0ms × (0.99,1.02) 35.1ms × (0.98,1.02) ~ (p=0.777)
BenchmarkJSONDecode-4 119ms × (0.98,1.01) 118ms × (0.98,1.02) ~ (p=0.710)
BenchmarkMandelbrot200-4 6.02ms × (1.00,1.00) 6.02ms × (1.00,1.00) ~ (p=0.289)
BenchmarkGoParse-4 7.96ms × (0.99,1.01) 7.96ms × (0.99,1.01) ~ (p=0.884)
BenchmarkRegexpMatchEasy0_32-4 164ns × (0.98,1.04) 166ns × (0.97,1.04) ~ (p=0.221)
BenchmarkRegexpMatchEasy0_1K-4 540ns × (0.99,1.01) 552ns × (0.97,1.04) +2.10% (p=0.018)
BenchmarkRegexpMatchEasy1_32-4 140ns × (0.99,1.04) 142ns × (0.97,1.04) ~ (p=0.226)
BenchmarkRegexpMatchEasy1_1K-4 896ns × (0.99,1.03) 907ns × (0.97,1.04) ~ (p=0.155)
BenchmarkRegexpMatchMedium_32-4 255ns × (0.99,1.04) 255ns × (0.98,1.04) ~ (p=0.904)
BenchmarkRegexpMatchMedium_1K-4 73.4µs × (0.99,1.04) 73.8µs × (0.98,1.04) ~ (p=0.560)
BenchmarkRegexpMatchHard_32-4 3.93µs × (0.98,1.04) 3.95µs × (0.98,1.04) ~ (p=0.571)
BenchmarkRegexpMatchHard_1K-4 117µs × (1.00,1.01) 119µs × (0.98,1.04) +1.48% (p=0.048)
BenchmarkRevcomp-4 990ms × (0.94,1.08) 989ms × (0.94,1.10) ~ (p=0.957)
BenchmarkTemplate-4 137ms × (0.98,1.02) 137ms × (0.99,1.01) ~ (p=0.996)
BenchmarkTimeParse-4 629ns × (1.00,1.00) 629ns × (0.99,1.01) ~ (p=0.924)
BenchmarkTimeFormat-4 710ns × (0.99,1.01) 716ns × (0.98,1.02) +0.84% (p=0.033)
Change-Id: I43a04e0f6ad5e3ba9847dddf12e13222561f9cf4
Reviewed-on: https://go-review.googlesource.com/9543
Reviewed-by: Austin Clements <austin@google.com>
2015-04-29 22:17:09 -06:00
|
|
|
// We used to check whether doing one time slice of work
|
|
|
|
// would remain under the utilization goal, but that has the
|
|
|
|
// effect of delaying work until the mutator has run for
|
|
|
|
// enough time slices to pay for the work. During those time
|
|
|
|
// slices, write barriers are enabled, so the mutator is running slower.
|
|
|
|
// Now instead we do the work whenever we're under or at the
|
|
|
|
// utilization work and pay for it by letting the mutator run later.
|
|
|
|
// This doesn't change the overall utilization averages, but it
|
|
|
|
// front loads the GC work so that the GC finishes earlier and
|
|
|
|
// write barriers can be turned off sooner, effectively giving
|
|
|
|
// the mutator a faster machine.
|
|
|
|
//
|
|
|
|
// The old, slower behavior can be restored by setting
|
|
|
|
// gcForcePreemptNS = forcePreemptNS.
|
|
|
|
const gcForcePreemptNS = 0
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// TODO(austin): We could fast path this and basically
|
2015-04-23 16:55:23 -06:00
|
|
|
// eliminate contention on c.fractionalMarkWorkersNeeded by
|
2015-04-15 15:01:30 -06:00
|
|
|
// precomputing the minimum time at which it's worth
|
|
|
|
// next scheduling the fractional worker. Then Ps
|
|
|
|
// don't have to fight in the window where we've
|
|
|
|
// passed that deadline and no one has started the
|
|
|
|
// worker yet.
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
//
|
2015-04-15 15:01:30 -06:00
|
|
|
// TODO(austin): Shorter preemption interval for mark
|
|
|
|
// worker to improve fairness and give this
|
|
|
|
// finer-grained control over schedule?
|
|
|
|
now := nanotime() - gcController.bgMarkStartTime
|
runtime: schedule GC work more aggressively
Schedule the work as early as possible, while still respecting the
utilization percentage on average. The old code tried never to
go above the utilization percentage. The new code is willing
to go above the utilization percentage by one time slice
(but of course after doing that it must wait until the percentage
drops back down to the target before it gets another time slice).
The effect is that for concurrent GCs that can run in a small number
of time slices, the time during which write barriers are enabled is
reduced by one mutator + GC time slice round (possibly 30 ms per GC).
This only affects the fractional GC processor (the remainder of GOMAXPROCS/4),
so it matters most in GOMAXPROCS=1, a bit in GOMAXPROCS=2, and not at
all in GOMAXPROCS=4.
GOMAXPROCS=1
name old mean new mean delta
BenchmarkBinaryTree17 12.4s × (0.98,1.03) 13.5s × (0.97,1.04) +8.84% (p=0.000)
BenchmarkFannkuch11 4.38s × (1.00,1.01) 4.38s × (1.00,1.01) ~ (p=0.343)
BenchmarkFmtFprintfEmpty 88.9ns × (0.97,1.10) 90.1ns × (0.93,1.14) ~ (p=0.224)
BenchmarkFmtFprintfString 356ns × (0.94,1.05) 321ns × (0.94,1.12) -9.77% (p=0.000)
BenchmarkFmtFprintfInt 344ns × (0.98,1.03) 325ns × (0.96,1.03) -5.46% (p=0.000)
BenchmarkFmtFprintfIntInt 622ns × (0.97,1.03) 571ns × (0.95,1.05) -8.09% (p=0.000)
BenchmarkFmtFprintfPrefixedInt 462ns × (0.96,1.04) 431ns × (0.95,1.05) -6.81% (p=0.000)
BenchmarkFmtFprintfFloat 653ns × (0.98,1.03) 621ns × (0.99,1.03) -4.90% (p=0.000)
BenchmarkFmtManyArgs 2.32µs × (0.97,1.03) 2.19µs × (0.98,1.02) -5.43% (p=0.000)
BenchmarkGobDecode 27.0ms × (0.96,1.04) 20.0ms × (0.97,1.04) -26.06% (p=0.000)
BenchmarkGobEncode 26.6ms × (0.99,1.01) 17.8ms × (0.95,1.05) -33.19% (p=0.000)
BenchmarkGzip 659ms × (0.98,1.03) 650ms × (0.99,1.01) -1.34% (p=0.000)
BenchmarkGunzip 145ms × (0.98,1.04) 143ms × (1.00,1.01) -1.47% (p=0.000)
BenchmarkHTTPClientServer 111µs × (0.97,1.04) 110µs × (0.96,1.03) -1.30% (p=0.000)
BenchmarkJSONEncode 52.0ms × (0.97,1.03) 40.8ms × (0.97,1.03) -21.47% (p=0.000)
BenchmarkJSONDecode 127ms × (0.98,1.04) 120ms × (0.98,1.02) -5.55% (p=0.000)
BenchmarkMandelbrot200 6.04ms × (0.99,1.04) 6.02ms × (1.00,1.01) ~ (p=0.176)
BenchmarkGoParse 8.62ms × (0.96,1.08) 8.55ms × (0.93,1.09) ~ (p=0.302)
BenchmarkRegexpMatchEasy0_32 164ns × (0.98,1.05) 165ns × (0.98,1.07) ~ (p=0.293)
BenchmarkRegexpMatchEasy0_1K 546ns × (0.98,1.06) 547ns × (0.97,1.07) ~ (p=0.741)
BenchmarkRegexpMatchEasy1_32 142ns × (0.97,1.09) 141ns × (0.97,1.05) ~ (p=0.231)
BenchmarkRegexpMatchEasy1_1K 904ns × (0.97,1.07) 900ns × (0.98,1.04) ~ (p=0.294)
BenchmarkRegexpMatchMedium_32 256ns × (0.98,1.06) 256ns × (0.97,1.04) ~ (p=0.530)
BenchmarkRegexpMatchMedium_1K 74.2µs × (0.98,1.05) 73.8µs × (0.98,1.04) ~ (p=0.334)
BenchmarkRegexpMatchHard_32 3.94µs × (0.98,1.07) 3.92µs × (0.98,1.05) ~ (p=0.356)
BenchmarkRegexpMatchHard_1K 119µs × (0.98,1.07) 119µs × (0.98,1.06) ~ (p=0.467)
BenchmarkRevcomp 978ms × (0.96,1.09) 984ms × (0.95,1.07) ~ (p=0.448)
BenchmarkTemplate 151ms × (0.96,1.03) 142ms × (0.95,1.04) -5.55% (p=0.000)
BenchmarkTimeParse 628ns × (0.99,1.01) 628ns × (0.99,1.01) ~ (p=0.855)
BenchmarkTimeFormat 729ns × (0.98,1.06) 734ns × (0.97,1.05) ~ (p=0.149)
GOMAXPROCS=2
name old mean new mean delta
BenchmarkBinaryTree17-2 9.80s × (0.97,1.03) 9.85s × (0.99,1.02) ~ (p=0.444)
BenchmarkFannkuch11-2 4.35s × (0.99,1.01) 4.40s × (0.98,1.05) ~ (p=0.099)
BenchmarkFmtFprintfEmpty-2 86.7ns × (0.97,1.05) 85.9ns × (0.98,1.04) ~ (p=0.409)
BenchmarkFmtFprintfString-2 297ns × (0.98,1.01) 297ns × (0.99,1.01) ~ (p=0.743)
BenchmarkFmtFprintfInt-2 309ns × (0.98,1.02) 310ns × (0.99,1.01) ~ (p=0.464)
BenchmarkFmtFprintfIntInt-2 525ns × (0.97,1.05) 518ns × (0.99,1.01) ~ (p=0.151)
BenchmarkFmtFprintfPrefixedInt-2 408ns × (0.98,1.02) 408ns × (0.98,1.03) ~ (p=0.797)
BenchmarkFmtFprintfFloat-2 603ns × (0.99,1.01) 604ns × (0.98,1.02) ~ (p=0.588)
BenchmarkFmtManyArgs-2 2.07µs × (0.98,1.02) 2.05µs × (0.99,1.01) ~ (p=0.091)
BenchmarkGobDecode-2 19.1ms × (0.97,1.01) 19.3ms × (0.97,1.04) ~ (p=0.195)
BenchmarkGobEncode-2 16.2ms × (0.97,1.03) 16.4ms × (0.99,1.01) ~ (p=0.069)
BenchmarkGzip-2 652ms × (0.99,1.01) 651ms × (0.99,1.01) ~ (p=0.705)
BenchmarkGunzip-2 143ms × (1.00,1.01) 143ms × (1.00,1.00) ~ (p=0.665)
BenchmarkHTTPClientServer-2 149µs × (0.92,1.11) 149µs × (0.91,1.08) ~ (p=0.862)
BenchmarkJSONEncode-2 34.6ms × (0.98,1.02) 37.2ms × (0.99,1.01) +7.56% (p=0.000)
BenchmarkJSONDecode-2 117ms × (0.99,1.01) 117ms × (0.99,1.01) ~ (p=0.858)
BenchmarkMandelbrot200-2 6.10ms × (0.99,1.03) 6.03ms × (1.00,1.00) ~ (p=0.083)
BenchmarkGoParse-2 8.25ms × (0.98,1.01) 8.21ms × (0.99,1.02) ~ (p=0.307)
BenchmarkRegexpMatchEasy0_32-2 162ns × (0.99,1.02) 162ns × (0.99,1.01) ~ (p=0.857)
BenchmarkRegexpMatchEasy0_1K-2 541ns × (0.99,1.01) 540ns × (1.00,1.00) ~ (p=0.530)
BenchmarkRegexpMatchEasy1_32-2 138ns × (1.00,1.00) 141ns × (0.98,1.04) +1.88% (p=0.038)
BenchmarkRegexpMatchEasy1_1K-2 887ns × (0.99,1.01) 894ns × (0.99,1.01) ~ (p=0.087)
BenchmarkRegexpMatchMedium_32-2 252ns × (0.99,1.01) 252ns × (0.99,1.01) ~ (p=0.954)
BenchmarkRegexpMatchMedium_1K-2 73.4µs × (0.99,1.02) 72.8µs × (1.00,1.01) -0.87% (p=0.029)
BenchmarkRegexpMatchHard_32-2 3.95µs × (0.97,1.05) 3.87µs × (1.00,1.01) -2.11% (p=0.035)
BenchmarkRegexpMatchHard_1K-2 117µs × (0.99,1.01) 117µs × (0.99,1.01) ~ (p=0.669)
BenchmarkRevcomp-2 980ms × (0.95,1.03) 993ms × (0.94,1.09) ~ (p=0.527)
BenchmarkTemplate-2 136ms × (0.98,1.01) 135ms × (0.99,1.01) ~ (p=0.200)
BenchmarkTimeParse-2 630ns × (1.00,1.01) 630ns × (1.00,1.00) ~ (p=0.634)
BenchmarkTimeFormat-2 705ns × (0.99,1.01) 710ns × (0.98,1.02) ~ (p=0.174)
GOMAXPROCS=4
BenchmarkBinaryTree17-4 9.87s × (0.96,1.04) 9.75s × (0.96,1.03) ~ (p=0.178)
BenchmarkFannkuch11-4 4.35s × (1.00,1.01) 4.40s × (0.99,1.04) ~ (p=0.071)
BenchmarkFmtFprintfEmpty-4 85.8ns × (0.98,1.06) 85.6ns × (0.98,1.04) ~ (p=0.858)
BenchmarkFmtFprintfString-4 306ns × (0.99,1.03) 304ns × (0.97,1.02) ~ (p=0.470)
BenchmarkFmtFprintfInt-4 317ns × (0.98,1.01) 315ns × (0.98,1.02) -0.92% (p=0.044)
BenchmarkFmtFprintfIntInt-4 527ns × (0.99,1.01) 525ns × (0.98,1.01) ~ (p=0.164)
BenchmarkFmtFprintfPrefixedInt-4 421ns × (0.98,1.03) 417ns × (0.99,1.02) ~ (p=0.092)
BenchmarkFmtFprintfFloat-4 623ns × (0.98,1.02) 618ns × (0.98,1.03) ~ (p=0.172)
BenchmarkFmtManyArgs-4 2.09µs × (0.98,1.02) 2.09µs × (0.98,1.02) ~ (p=0.679)
BenchmarkGobDecode-4 18.6ms × (0.99,1.01) 18.6ms × (0.98,1.03) ~ (p=0.595)
BenchmarkGobEncode-4 15.0ms × (0.98,1.02) 15.1ms × (0.99,1.01) ~ (p=0.301)
BenchmarkGzip-4 659ms × (0.98,1.04) 660ms × (0.97,1.02) ~ (p=0.724)
BenchmarkGunzip-4 145ms × (0.98,1.04) 144ms × (0.99,1.04) ~ (p=0.671)
BenchmarkHTTPClientServer-4 139µs × (0.97,1.02) 138µs × (0.99,1.02) ~ (p=0.392)
BenchmarkJSONEncode-4 35.0ms × (0.99,1.02) 35.1ms × (0.98,1.02) ~ (p=0.777)
BenchmarkJSONDecode-4 119ms × (0.98,1.01) 118ms × (0.98,1.02) ~ (p=0.710)
BenchmarkMandelbrot200-4 6.02ms × (1.00,1.00) 6.02ms × (1.00,1.00) ~ (p=0.289)
BenchmarkGoParse-4 7.96ms × (0.99,1.01) 7.96ms × (0.99,1.01) ~ (p=0.884)
BenchmarkRegexpMatchEasy0_32-4 164ns × (0.98,1.04) 166ns × (0.97,1.04) ~ (p=0.221)
BenchmarkRegexpMatchEasy0_1K-4 540ns × (0.99,1.01) 552ns × (0.97,1.04) +2.10% (p=0.018)
BenchmarkRegexpMatchEasy1_32-4 140ns × (0.99,1.04) 142ns × (0.97,1.04) ~ (p=0.226)
BenchmarkRegexpMatchEasy1_1K-4 896ns × (0.99,1.03) 907ns × (0.97,1.04) ~ (p=0.155)
BenchmarkRegexpMatchMedium_32-4 255ns × (0.99,1.04) 255ns × (0.98,1.04) ~ (p=0.904)
BenchmarkRegexpMatchMedium_1K-4 73.4µs × (0.99,1.04) 73.8µs × (0.98,1.04) ~ (p=0.560)
BenchmarkRegexpMatchHard_32-4 3.93µs × (0.98,1.04) 3.95µs × (0.98,1.04) ~ (p=0.571)
BenchmarkRegexpMatchHard_1K-4 117µs × (1.00,1.01) 119µs × (0.98,1.04) +1.48% (p=0.048)
BenchmarkRevcomp-4 990ms × (0.94,1.08) 989ms × (0.94,1.10) ~ (p=0.957)
BenchmarkTemplate-4 137ms × (0.98,1.02) 137ms × (0.99,1.01) ~ (p=0.996)
BenchmarkTimeParse-4 629ns × (1.00,1.00) 629ns × (0.99,1.01) ~ (p=0.924)
BenchmarkTimeFormat-4 710ns × (0.99,1.01) 716ns × (0.98,1.02) +0.84% (p=0.033)
Change-Id: I43a04e0f6ad5e3ba9847dddf12e13222561f9cf4
Reviewed-on: https://go-review.googlesource.com/9543
Reviewed-by: Austin Clements <austin@google.com>
2015-04-29 22:17:09 -06:00
|
|
|
then := now + gcForcePreemptNS
|
|
|
|
timeUsed := c.fractionalMarkTime + gcForcePreemptNS
|
|
|
|
if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
|
2015-04-15 15:01:30 -06:00
|
|
|
// Nope, we'd overshoot the utilization goal
|
|
|
|
xaddint64(&c.fractionalMarkWorkersNeeded, +1)
|
|
|
|
return nil
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
}
|
2015-04-15 15:01:30 -06:00
|
|
|
_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
}
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// Run the background mark worker
|
|
|
|
gp := _p_.gcBgMarkWorker
|
|
|
|
casgstatus(gp, _Gwaiting, _Grunnable)
|
|
|
|
if trace.enabled {
|
|
|
|
traceGoUnpark(gp, 0)
|
|
|
|
}
|
|
|
|
return gp
|
2015-03-12 10:08:47 -06:00
|
|
|
}
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
// gcGoalUtilization is the goal CPU utilization for background
|
|
|
|
// marking as a fraction of GOMAXPROCS.
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
const gcGoalUtilization = 0.25
|
|
|
|
|
2015-03-13 11:29:23 -06:00
|
|
|
// gcBgCreditSlack is the amount of scan work credit background
|
|
|
|
// scanning can accumulate locally before updating
|
|
|
|
// gcController.bgScanCredit. Lower values give mutator assists more
|
|
|
|
// accurate accounting of background scanning. Higher values reduce
|
|
|
|
// memory contention.
|
|
|
|
const gcBgCreditSlack = 2000
|
|
|
|
|
2015-03-17 10:17:47 -06:00
|
|
|
// gcAssistTimeSlack is the nanoseconds of mutator assist time that
|
|
|
|
// can accumulate on a P before updating gcController.assistTime.
|
|
|
|
const gcAssistTimeSlack = 5000
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Determine whether to initiate a GC.
|
|
|
|
// If the GC is already working no need to trigger another one.
|
|
|
|
// This should establish a feedback loop where if the GC does not
|
|
|
|
// have sufficient time to complete then more memory will be
|
|
|
|
// requested from the OS increasing heap size thus allow future
|
|
|
|
// GCs more time to complete.
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
// memstat.heap_live read has a benign race.
|
2015-02-19 11:38:46 -07:00
|
|
|
// A false negative simple does not start a GC, a false positive
|
|
|
|
// will start a GC needlessly. Neither have correctness issues.
|
|
|
|
func shouldtriggergc() bool {
|
2015-03-31 09:44:11 -06:00
|
|
|
return memstats.heap_live >= memstats.next_gc && atomicloaduint(&bggc.working) == 0
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
var work struct {
|
2015-02-19 11:38:46 -07:00
|
|
|
full uint64 // lock-free list of full blocks workbuf
|
|
|
|
empty uint64 // lock-free list of empty blocks workbuf
|
|
|
|
partial uint64 // lock-free list of partially filled blocks workbuf
|
|
|
|
pad0 [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
|
|
|
|
nproc uint32
|
|
|
|
tstart int64
|
|
|
|
nwait uint32
|
|
|
|
ndone uint32
|
|
|
|
alldone note
|
|
|
|
markfor *parfor
|
2014-11-15 06:00:38 -07:00
|
|
|
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
bgMarkReady note // signal background mark worker has started
|
|
|
|
bgMarkDone uint32 // cas to 1 when at a background mark completion point
|
2015-04-22 15:44:36 -06:00
|
|
|
|
|
|
|
// Background mark completion signaling
|
|
|
|
bgMarkWake struct {
|
|
|
|
lock mutex
|
|
|
|
g *g
|
|
|
|
wake bool
|
|
|
|
}
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Copy of mheap.allspans for marker or sweeper.
|
|
|
|
spans []*mspan
|
2015-04-01 11:47:35 -06:00
|
|
|
|
|
|
|
// totaltime is the CPU nanoseconds spent in GC since the
|
|
|
|
// program started if debug.gctrace > 0.
|
|
|
|
totaltime int64
|
2015-03-12 14:53:57 -06:00
|
|
|
|
|
|
|
// bytesMarked is the number of bytes marked this cycle. This
|
|
|
|
// includes bytes blackened in scanned objects, noscan objects
|
|
|
|
// that go straight to black, and permagrey objects scanned by
|
|
|
|
// markroot during the concurrent scan phase. This is updated
|
|
|
|
// atomically during the cycle. Updates may be batched
|
|
|
|
// arbitrarily, since the value is only read at the end of the
|
|
|
|
// cycle.
|
|
|
|
//
|
|
|
|
// Because of benign races during marking, this number may not
|
|
|
|
// be the exact number of marked bytes, but it should be very
|
|
|
|
// close.
|
|
|
|
bytesMarked uint64
|
runtime: fix underflow in next_gc calculation
Currently, it's possible for the next_gc calculation to underflow.
Since next_gc is unsigned, this wraps around and effectively disables
GC for the rest of the program's execution. Besides being obviously
wrong, this is causing test failures on 32-bit because some tests are
running out of heap.
This underflow happens for two reasons, both having to do with how we
estimate the reachable heap size at the end of the GC cycle.
One reason is that this calculation depends on the value of heap_live
at the beginning of the GC cycle, but we currently only record that
value during a concurrent GC and not during a forced STW GC. Fix this
by moving the recorded value from gcController to work and recording
it on a common code path.
The other reason is that we use the amount of allocation during the GC
cycle as an approximation of the amount of floating garbage and
subtract it from the marked heap to estimate the reachable heap.
However, since this is only an approximation, it's possible for the
amount of allocation during the cycle to be *larger* than the marked
heap size (since the runtime allocates white and it's possible for
these allocations to never be made reachable from the heap). Currently
this causes wrap-around in our estimate of the reachable heap size,
which in turn causes wrap-around in next_gc. Fix this by bottoming out
the reachable heap estimate at 0, in which case we just fall back to
triggering GC at heapminimum (which is okay since this only happens on
small heaps).
Fixes #10555, fixes #10556, and fixes #10559.
Change-Id: Iad07b529c03772356fede2ae557732f13ebfdb63
Reviewed-on: https://go-review.googlesource.com/9286
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-23 11:02:31 -06:00
|
|
|
|
|
|
|
// initialHeapLive is the value of memstats.heap_live at the
|
|
|
|
// beginning of this GC cycle.
|
|
|
|
initialHeapLive uint64
|
2015-01-06 12:58:49 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// GC runs a garbage collection.
|
|
|
|
func GC() {
|
2015-02-19 13:48:40 -07:00
|
|
|
startGC(gcForceBlockMode)
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
const (
|
|
|
|
gcBackgroundMode = iota // concurrent GC
|
|
|
|
gcForceMode // stop-the-world GC now
|
|
|
|
gcForceBlockMode // stop-the-world GC now and wait for sweep
|
|
|
|
)
|
|
|
|
|
|
|
|
func startGC(mode int) {
|
2015-02-19 11:38:46 -07:00
|
|
|
// The gc is turned off (via enablegc) until the bootstrap has completed.
|
|
|
|
// Also, malloc gets called in the guts of a number of libraries that might be
|
|
|
|
// holding locks. To avoid deadlocks during stoptheworld, don't bother
|
|
|
|
// trying to run gc while holding a lock. The next mallocgc without a lock
|
|
|
|
// will do the gc instead.
|
|
|
|
mp := acquirem()
|
2015-05-15 14:03:27 -06:00
|
|
|
if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
|
2015-02-19 11:38:46 -07:00
|
|
|
releasem(mp)
|
2014-11-11 15:05:02 -07:00
|
|
|
return
|
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
releasem(mp)
|
|
|
|
mp = nil
|
2014-11-20 10:08:13 -07:00
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
if mode != gcBackgroundMode {
|
|
|
|
// special synchronous cases
|
|
|
|
gc(mode)
|
|
|
|
return
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-02-19 13:48:40 -07:00
|
|
|
|
|
|
|
// trigger concurrent GC
|
2015-04-22 13:05:08 -06:00
|
|
|
readied := false
|
2015-02-19 13:48:40 -07:00
|
|
|
lock(&bggc.lock)
|
|
|
|
if !bggc.started {
|
|
|
|
bggc.working = 1
|
|
|
|
bggc.started = true
|
2015-04-22 13:05:08 -06:00
|
|
|
readied = true
|
2015-02-19 13:48:40 -07:00
|
|
|
go backgroundgc()
|
|
|
|
} else if bggc.working == 0 {
|
|
|
|
bggc.working = 1
|
2015-04-22 13:05:08 -06:00
|
|
|
readied = true
|
|
|
|
ready(bggc.g, 0)
|
2015-02-19 13:48:40 -07:00
|
|
|
}
|
|
|
|
unlock(&bggc.lock)
|
2015-04-22 13:05:08 -06:00
|
|
|
if readied {
|
|
|
|
// This G just started or ready()d the GC goroutine.
|
|
|
|
// Switch directly to it by yielding.
|
|
|
|
Gosched()
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
// State of the background concurrent GC goroutine.
|
|
|
|
var bggc struct {
|
|
|
|
lock mutex
|
|
|
|
g *g
|
|
|
|
working uint
|
|
|
|
started bool
|
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// backgroundgc is running in a goroutine and does the concurrent GC work.
|
|
|
|
// bggc holds the state of the backgroundgc.
|
|
|
|
func backgroundgc() {
|
|
|
|
bggc.g = getg()
|
2014-11-11 15:05:02 -07:00
|
|
|
for {
|
2015-02-19 13:48:40 -07:00
|
|
|
gc(gcBackgroundMode)
|
2015-02-19 11:38:46 -07:00
|
|
|
lock(&bggc.lock)
|
|
|
|
bggc.working = 0
|
2015-02-21 11:01:40 -07:00
|
|
|
goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock, 1)
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
func gc(mode int) {
|
2015-03-26 16:48:42 -06:00
|
|
|
// debug.gctrace variables
|
|
|
|
var stwprocs, maxprocs int32
|
|
|
|
var tSweepTerm, tScan, tInstallWB, tMark, tMarkTerm int64
|
2015-04-21 14:38:38 -06:00
|
|
|
var heap0, heap1, heap2, heapGoal uint64
|
2015-03-26 16:48:42 -06:00
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
// Ok, we're doing it! Stop everybody else
|
2015-02-19 11:38:46 -07:00
|
|
|
semacquire(&worldsema, false)
|
2014-12-12 10:41:57 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Pick up the remaining unswept/not being swept spans concurrently
|
2015-04-03 13:27:44 -06:00
|
|
|
//
|
runtime: finish sweeping before concurrent GC starts
Currently, the concurrent sweep follows a 1:1 rule: when allocation
needs a span, it sweeps a span (likewise, when a large allocation
needs N pages, it sweeps until it frees N pages). This rule worked
well for the STW collector (especially when GOGC==100) because it did
no more sweeping than necessary to keep the heap from growing, would
generally finish sweeping just before GC, and ensured good temporal
locality between sweeping a page and allocating from it.
It doesn't work well with concurrent GC. Since concurrent GC requires
starting GC earlier (sometimes much earlier), the sweep often won't be
done when GC starts. Unfortunately, the first thing GC has to do is
finish the sweep. In the mean time, the mutator can continue
allocating, pushing the heap size even closer to the goal size. This
worked okay with the 7/8ths trigger, but it gets into a vicious cycle
with the GC trigger controller: if the mutator is allocating quickly
and driving the trigger lower, more and more sweep work will be left
to GC; this both causes GC to take longer (allowing the mutator to
allocate more during GC) and delays the start of the concurrent mark
phase, which throws off the GC controller's statistics and generally
causes it to push the trigger even lower.
As an example of a particularly bad case, the garbage benchmark with
GOMAXPROCS=4 and -benchmem 512 (MB) spends the first 0.4-0.8 seconds
of each GC cycle sweeping, during which the heap grows by between
109MB and 252MB.
To fix this, this change replaces the 1:1 sweep rule with a
proportional sweep rule. At the end of GC, GC knows exactly how much
heap allocation will occur before the next concurrent GC as well as
how many span pages must be swept. This change computes this "sweep
ratio" and when the mallocgc asks for a span, the mcentral sweeps
enough spans to bring the swept span count into ratio with the
allocated byte count.
On the benchmark from above, this entirely eliminates sweeping at the
beginning of GC, which reduces the time between startGC readying the
GC goroutine and GC stopping the world for sweep termination to ~100µs
during which the heap grows at most 134KB.
Change-Id: I35422d6bba0c2310d48bb1f8f30a72d29e98c1af
Reviewed-on: https://go-review.googlesource.com/8921
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-13 21:34:57 -06:00
|
|
|
// This shouldn't happen if we're being invoked in background
|
|
|
|
// mode since proportional sweep should have just finished
|
|
|
|
// sweeping everything, but rounding errors, etc, may leave a
|
|
|
|
// few spans unswept. In forced mode, this is necessary since
|
|
|
|
// GC can be forced at any point in the sweeping cycle.
|
2015-02-19 11:38:46 -07:00
|
|
|
for gosweepone() != ^uintptr(0) {
|
|
|
|
sweep.nbgsweep++
|
2014-11-15 06:00:38 -07:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
if mode == gcBackgroundMode {
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
gcBgMarkStartWorkers()
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
stwprocs, maxprocs = gcprocs(), gomaxprocs
|
|
|
|
tSweepTerm = nanotime()
|
2015-04-04 10:27:46 -06:00
|
|
|
if mode == gcBackgroundMode {
|
|
|
|
// We started GC when heap_live == next_gc,
|
|
|
|
// but the mutator may have allocated between
|
|
|
|
// then and now. Report heap when GC started.
|
|
|
|
heap0 = memstats.next_gc
|
|
|
|
} else {
|
|
|
|
heap0 = memstats.heap_live
|
|
|
|
}
|
2015-03-26 16:48:42 -06:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCStart()
|
|
|
|
}
|
|
|
|
|
|
|
|
systemstack(stoptheworld)
|
|
|
|
systemstack(finishsweep_m) // finish sweep before we start concurrent scan.
|
2015-03-05 15:33:08 -07:00
|
|
|
// clearpools before we start the GC. If we wait they memory will not be
|
|
|
|
// reclaimed until the next GC cycle.
|
|
|
|
clearpools()
|
2015-02-19 13:48:40 -07:00
|
|
|
|
2015-03-12 14:53:57 -06:00
|
|
|
work.bytesMarked = 0
|
runtime: fix underflow in next_gc calculation
Currently, it's possible for the next_gc calculation to underflow.
Since next_gc is unsigned, this wraps around and effectively disables
GC for the rest of the program's execution. Besides being obviously
wrong, this is causing test failures on 32-bit because some tests are
running out of heap.
This underflow happens for two reasons, both having to do with how we
estimate the reachable heap size at the end of the GC cycle.
One reason is that this calculation depends on the value of heap_live
at the beginning of the GC cycle, but we currently only record that
value during a concurrent GC and not during a forced STW GC. Fix this
by moving the recorded value from gcController to work and recording
it on a common code path.
The other reason is that we use the amount of allocation during the GC
cycle as an approximation of the amount of floating garbage and
subtract it from the marked heap to estimate the reachable heap.
However, since this is only an approximation, it's possible for the
amount of allocation during the cycle to be *larger* than the marked
heap size (since the runtime allocates white and it's possible for
these allocations to never be made reachable from the heap). Currently
this causes wrap-around in our estimate of the reachable heap size,
which in turn causes wrap-around in next_gc. Fix this by bottoming out
the reachable heap estimate at 0, in which case we just fall back to
triggering GC at heapminimum (which is okay since this only happens on
small heaps).
Fixes #10555, fixes #10556, and fixes #10559.
Change-Id: Iad07b529c03772356fede2ae557732f13ebfdb63
Reviewed-on: https://go-review.googlesource.com/9286
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-23 11:02:31 -06:00
|
|
|
work.initialHeapLive = memstats.heap_live
|
2015-03-12 14:53:57 -06:00
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
if mode == gcBackgroundMode { // Do as much work concurrently as possible
|
2015-03-12 10:08:47 -06:00
|
|
|
gcController.startCycle()
|
2015-04-21 14:38:38 -06:00
|
|
|
heapGoal = gcController.heapGoal
|
2015-03-12 10:08:47 -06:00
|
|
|
|
2015-02-19 13:48:40 -07:00
|
|
|
systemstack(func() {
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCscan)
|
2015-02-19 13:48:40 -07:00
|
|
|
|
|
|
|
// Concurrent scan.
|
|
|
|
starttheworld()
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
tScan = nanotime()
|
|
|
|
}
|
2015-02-19 13:48:40 -07:00
|
|
|
gcscan_m()
|
|
|
|
|
2015-03-27 15:01:53 -06:00
|
|
|
// Enter mark phase. This enables write
|
|
|
|
// barriers.
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
tInstallWB = nanotime()
|
|
|
|
}
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCmark)
|
2015-03-27 15:01:53 -06:00
|
|
|
// Ensure all Ps have observed the phase
|
|
|
|
// change and have write barriers enabled
|
|
|
|
// before any blackening occurs.
|
|
|
|
forEachP(func(*p) {})
|
2015-02-19 13:48:40 -07:00
|
|
|
})
|
2015-03-27 15:01:53 -06:00
|
|
|
// Concurrent mark.
|
|
|
|
gcBgMarkPrepare() // Must happen before assist enable.
|
|
|
|
// At this point all Ps have enabled the mark phase
|
|
|
|
// write barrier, thus maintaining the no white to
|
|
|
|
// black invariant. Mutator assists and mark workers
|
|
|
|
// can now be enabled to safely blacken grey objects.
|
|
|
|
atomicstore(&gcBlackenEnabled, 1)
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
tMark = nanotime()
|
|
|
|
}
|
2015-04-22 15:44:36 -06:00
|
|
|
|
|
|
|
// Wait for background mark completion.
|
|
|
|
lock(&work.bgMarkWake.lock)
|
|
|
|
if work.bgMarkWake.wake {
|
|
|
|
// Wakeup already happened
|
|
|
|
unlock(&work.bgMarkWake.lock)
|
|
|
|
} else {
|
|
|
|
work.bgMarkWake.g = getg()
|
|
|
|
goparkunlock(&work.bgMarkWake.lock, "mark wait (idle)", traceEvGoBlock, 1)
|
|
|
|
}
|
|
|
|
work.bgMarkWake.wake = false
|
|
|
|
work.bgMarkWake.g = nil
|
2015-03-18 09:22:12 -06:00
|
|
|
|
|
|
|
// Begin mark termination.
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
tMarkTerm = nanotime()
|
|
|
|
}
|
2015-03-18 09:22:12 -06:00
|
|
|
systemstack(stoptheworld)
|
|
|
|
// The gcphase is _GCmark, it will transition to _GCmarktermination
|
|
|
|
// below. The important thing is that the wb remains active until
|
|
|
|
// all marking is complete. This includes writes made by the GC.
|
2015-03-12 15:56:14 -06:00
|
|
|
|
2015-05-01 16:08:44 -06:00
|
|
|
// Flush the gcWork caches. This must be done before
|
|
|
|
// endCycle since endCycle depends on statistics kept
|
|
|
|
// in these caches.
|
|
|
|
gcFlushGCWork()
|
|
|
|
|
2015-03-12 15:56:14 -06:00
|
|
|
gcController.endCycle()
|
2015-02-19 11:38:46 -07:00
|
|
|
} else {
|
2015-02-19 13:48:40 -07:00
|
|
|
// For non-concurrent GC (mode != gcBackgroundMode)
|
2015-02-24 20:20:38 -07:00
|
|
|
// The g stacks have not been scanned so clear g state
|
2015-02-19 13:48:40 -07:00
|
|
|
// such that mark termination scans all stacks.
|
2015-02-24 20:20:38 -07:00
|
|
|
gcResetGState()
|
2015-03-26 16:48:42 -06:00
|
|
|
|
|
|
|
if debug.gctrace > 0 {
|
|
|
|
t := nanotime()
|
|
|
|
tScan, tInstallWB, tMark, tMarkTerm = t, t, t, t
|
2015-04-21 14:38:38 -06:00
|
|
|
heapGoal = heap0
|
2015-03-26 16:48:42 -06:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-03-05 15:33:08 -07:00
|
|
|
// World is stopped.
|
|
|
|
// Start marktermination which includes enabling the write barrier.
|
2015-03-27 15:01:53 -06:00
|
|
|
atomicstore(&gcBlackenEnabled, 0)
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCmarktermination)
|
2015-03-05 15:33:08 -07:00
|
|
|
|
2015-03-26 16:48:42 -06:00
|
|
|
if debug.gctrace > 0 {
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
heap1 = memstats.heap_live
|
2015-03-26 16:48:42 -06:00
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
startTime := nanotime()
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-03-23 16:15:14 -06:00
|
|
|
mp := acquirem()
|
|
|
|
mp.preemptoff = "gcing"
|
2015-02-19 14:43:27 -07:00
|
|
|
_g_ := getg()
|
|
|
|
_g_.m.traceback = 2
|
|
|
|
gp := _g_.m.curg
|
|
|
|
casgstatus(gp, _Grunning, _Gwaiting)
|
|
|
|
gp.waitreason = "garbage collection"
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Run gc on the g0 stack. We do this so that the g stack
|
|
|
|
// we're currently running on will no longer change. Cuts
|
|
|
|
// the root set down a bit (g0 stacks are not scanned, and
|
|
|
|
// we don't need to scan gc's internal state). We also
|
|
|
|
// need to switch to g0 so we can shrink the stack.
|
2015-02-19 14:21:00 -07:00
|
|
|
systemstack(func() {
|
2015-02-19 14:43:27 -07:00
|
|
|
gcMark(startTime)
|
2015-04-02 17:53:02 -06:00
|
|
|
if debug.gctrace > 0 {
|
|
|
|
heap2 = work.bytesMarked
|
|
|
|
}
|
2015-02-19 14:43:27 -07:00
|
|
|
if debug.gccheckmark > 0 {
|
|
|
|
// Run a full stop-the-world mark using checkmark bits,
|
|
|
|
// to check that we didn't forget to mark anything during
|
|
|
|
// the concurrent mark process.
|
|
|
|
initCheckmarks()
|
|
|
|
gcMark(startTime)
|
|
|
|
clearCheckmarks()
|
2015-02-19 13:48:40 -07:00
|
|
|
}
|
2015-03-05 15:33:08 -07:00
|
|
|
|
|
|
|
// marking is complete so we can turn the write barrier off
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCoff)
|
2015-02-19 14:43:27 -07:00
|
|
|
gcSweep(mode)
|
2015-02-19 13:48:40 -07:00
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
if debug.gctrace > 1 {
|
|
|
|
startTime = nanotime()
|
2015-02-24 15:49:55 -07:00
|
|
|
// The g stacks have been scanned so
|
|
|
|
// they have gcscanvalid==true and gcworkdone==true.
|
|
|
|
// Reset these so that all stacks will be rescanned.
|
2015-02-24 20:20:38 -07:00
|
|
|
gcResetGState()
|
2015-02-19 14:43:27 -07:00
|
|
|
finishsweep_m()
|
2015-03-05 15:33:08 -07:00
|
|
|
|
|
|
|
// Still in STW but gcphase is _GCoff, reset to _GCmarktermination
|
|
|
|
// At this point all objects will be found during the gcMark which
|
|
|
|
// does a complete STW mark and object scan.
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCmarktermination)
|
2015-02-19 14:43:27 -07:00
|
|
|
gcMark(startTime)
|
runtime: replace needwb() with writeBarrierEnabled
Reduce the write barrier check to a single load and compare
so that it can be inlined into write barrier use sites.
Makes the standard write barrier a little faster too.
name old new delta
BenchmarkBinaryTree17 17.9s × (0.99,1.01) 17.9s × (1.00,1.01) ~
BenchmarkFannkuch11 4.35s × (1.00,1.00) 4.43s × (1.00,1.00) +1.81%
BenchmarkFmtFprintfEmpty 120ns × (0.93,1.06) 110ns × (1.00,1.06) -7.92%
BenchmarkFmtFprintfString 479ns × (0.99,1.00) 487ns × (0.99,1.00) +1.67%
BenchmarkFmtFprintfInt 452ns × (0.99,1.02) 450ns × (0.99,1.00) ~
BenchmarkFmtFprintfIntInt 766ns × (0.99,1.01) 762ns × (1.00,1.00) ~
BenchmarkFmtFprintfPrefixedInt 576ns × (0.98,1.01) 584ns × (0.99,1.01) ~
BenchmarkFmtFprintfFloat 730ns × (1.00,1.01) 738ns × (1.00,1.00) +1.16%
BenchmarkFmtManyArgs 2.84µs × (0.99,1.00) 2.80µs × (1.00,1.01) -1.22%
BenchmarkGobDecode 39.3ms × (0.98,1.01) 39.0ms × (0.99,1.00) ~
BenchmarkGobEncode 39.5ms × (0.99,1.01) 37.8ms × (0.98,1.01) -4.33%
BenchmarkGzip 663ms × (1.00,1.01) 661ms × (0.99,1.01) ~
BenchmarkGunzip 143ms × (1.00,1.00) 142ms × (1.00,1.00) ~
BenchmarkHTTPClientServer 132µs × (0.99,1.01) 132µs × (0.99,1.01) ~
BenchmarkJSONEncode 57.4ms × (0.99,1.01) 56.3ms × (0.99,1.01) -1.96%
BenchmarkJSONDecode 139ms × (0.99,1.00) 138ms × (0.99,1.01) ~
BenchmarkMandelbrot200 6.03ms × (1.00,1.00) 6.01ms × (1.00,1.00) ~
BenchmarkGoParse 10.3ms × (0.89,1.14) 10.2ms × (0.87,1.05) ~
BenchmarkRegexpMatchEasy0_32 209ns × (1.00,1.00) 208ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy0_1K 591ns × (0.99,1.00) 588ns × (1.00,1.00) ~
BenchmarkRegexpMatchEasy1_32 184ns × (0.99,1.02) 182ns × (0.99,1.01) ~
BenchmarkRegexpMatchEasy1_1K 1.01µs × (1.00,1.00) 0.99µs × (1.00,1.01) -2.33%
BenchmarkRegexpMatchMedium_32 330ns × (1.00,1.00) 323ns × (1.00,1.01) -2.12%
BenchmarkRegexpMatchMedium_1K 92.6µs × (1.00,1.00) 89.9µs × (1.00,1.00) -2.92%
BenchmarkRegexpMatchHard_32 4.80µs × (0.95,1.00) 4.72µs × (0.95,1.01) ~
BenchmarkRegexpMatchHard_1K 136µs × (1.00,1.00) 133µs × (1.00,1.01) -1.86%
BenchmarkRevcomp 900ms × (0.99,1.04) 900ms × (1.00,1.05) ~
BenchmarkTemplate 172ms × (1.00,1.00) 168ms × (0.99,1.01) -2.07%
BenchmarkTimeParse 637ns × (1.00,1.00) 637ns × (1.00,1.00) ~
BenchmarkTimeFormat 744ns × (1.00,1.01) 738ns × (1.00,1.00) -0.67%
Change-Id: I4ecc925805da1f5ee264377f1f7574f54ee575e7
Reviewed-on: https://go-review.googlesource.com/9321
Reviewed-by: Austin Clements <austin@google.com>
2015-04-24 12:00:55 -06:00
|
|
|
setGCPhase(_GCoff) // marking is done, turn off wb.
|
2015-02-19 14:43:27 -07:00
|
|
|
gcSweep(mode)
|
2015-02-19 13:48:40 -07:00
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
})
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
_g_.m.traceback = 0
|
|
|
|
casgstatus(gp, _Gwaiting, _Grunning)
|
2015-02-19 14:21:00 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCDone()
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// all done
|
|
|
|
mp.preemptoff = ""
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
semrelease(&worldsema)
|
|
|
|
|
2015-03-05 15:33:08 -07:00
|
|
|
if gcphase != _GCoff {
|
|
|
|
throw("gc done but gcphase != _GCoff")
|
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
systemstack(starttheworld)
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
releasem(mp)
|
|
|
|
mp = nil
|
2015-01-28 13:57:46 -07:00
|
|
|
|
2015-03-26 16:48:42 -06:00
|
|
|
memstats.numgc++
|
|
|
|
if debug.gctrace > 0 {
|
|
|
|
tEnd := nanotime()
|
2015-04-01 11:47:35 -06:00
|
|
|
|
|
|
|
// Update work.totaltime
|
|
|
|
sweepTermCpu := int64(stwprocs) * (tScan - tSweepTerm)
|
|
|
|
scanCpu := tInstallWB - tScan
|
2015-03-27 15:01:53 -06:00
|
|
|
installWBCpu := int64(0)
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
// We report idle marking time below, but omit it from
|
|
|
|
// the overall utilization here since it's "free".
|
2015-04-15 15:01:30 -06:00
|
|
|
markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
|
2015-04-01 11:47:35 -06:00
|
|
|
markTermCpu := int64(stwprocs) * (tEnd - tMarkTerm)
|
|
|
|
cycleCpu := sweepTermCpu + scanCpu + installWBCpu + markCpu + markTermCpu
|
|
|
|
work.totaltime += cycleCpu
|
|
|
|
|
|
|
|
// Compute overall utilization
|
|
|
|
totalCpu := sched.totaltime + (tEnd-sched.procresizetime)*int64(gomaxprocs)
|
|
|
|
util := work.totaltime * 100 / totalCpu
|
|
|
|
|
2015-03-26 16:48:42 -06:00
|
|
|
var sbuf [24]byte
|
|
|
|
printlock()
|
|
|
|
print("gc #", memstats.numgc,
|
2015-04-01 11:47:35 -06:00
|
|
|
" @", string(itoaDiv(sbuf[:], uint64(tEnd-runtimeInitTime)/1e6, 3)), "s ",
|
|
|
|
util, "%: ",
|
2015-03-26 16:48:42 -06:00
|
|
|
(tScan-tSweepTerm)/1e6,
|
|
|
|
"+", (tInstallWB-tScan)/1e6,
|
|
|
|
"+", (tMark-tInstallWB)/1e6,
|
|
|
|
"+", (tMarkTerm-tMark)/1e6,
|
|
|
|
"+", (tEnd-tMarkTerm)/1e6, " ms clock, ",
|
2015-04-01 11:47:35 -06:00
|
|
|
sweepTermCpu/1e6,
|
|
|
|
"+", scanCpu/1e6,
|
|
|
|
"+", installWBCpu/1e6,
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
"+", gcController.assistTime/1e6,
|
2015-04-15 15:01:30 -06:00
|
|
|
"/", (gcController.dedicatedMarkTime+gcController.fractionalMarkTime)/1e6,
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
"/", gcController.idleMarkTime/1e6,
|
2015-04-01 11:47:35 -06:00
|
|
|
"+", markTermCpu/1e6, " ms cpu, ",
|
2015-04-02 17:53:02 -06:00
|
|
|
heap0>>20, "->", heap1>>20, "->", heap2>>20, " MB, ",
|
2015-04-21 14:38:38 -06:00
|
|
|
heapGoal>>20, " MB goal, ",
|
2015-03-26 16:48:42 -06:00
|
|
|
maxprocs, " P")
|
|
|
|
if mode != gcBackgroundMode {
|
|
|
|
print(" (forced)")
|
|
|
|
}
|
|
|
|
print("\n")
|
|
|
|
printunlock()
|
|
|
|
}
|
|
|
|
sweep.nbgsweep = 0
|
|
|
|
sweep.npausesweep = 0
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// now that gc is done, kick off finalizer thread if needed
|
|
|
|
if !concurrentSweep {
|
|
|
|
// give the queued finalizers, if any, a chance to run
|
|
|
|
Gosched()
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
// gcBgMarkStartWorkers prepares background mark worker goroutines.
|
|
|
|
// These goroutines will not run until the mark phase, but they must
|
|
|
|
// be started while the work is not stopped and from a regular G
|
|
|
|
// stack. The caller must hold worldsema.
|
|
|
|
func gcBgMarkStartWorkers() {
|
|
|
|
// Background marking is performed by per-P G's. Ensure that
|
|
|
|
// each P has a background GC G.
|
|
|
|
for _, p := range &allp {
|
|
|
|
if p == nil || p.status == _Pdead {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if p.gcBgMarkWorker == nil {
|
|
|
|
go gcBgMarkWorker(p)
|
|
|
|
notetsleepg(&work.bgMarkReady, -1)
|
|
|
|
noteclear(&work.bgMarkReady)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// gcBgMarkPrepare sets up state for background marking.
|
|
|
|
// Mutator assists must not yet be enabled.
|
|
|
|
func gcBgMarkPrepare() {
|
|
|
|
// Background marking will stop when the work queues are empty
|
|
|
|
// and there are no more workers (note that, since this is
|
|
|
|
// concurrent, this may be a transient state, but mark
|
|
|
|
// termination will clean it up). Between background workers
|
|
|
|
// and assists, we don't really know how many workers there
|
|
|
|
// will be, so we pretend to have an arbitrarily large number
|
|
|
|
// of workers, almost all of which are "waiting". While a
|
|
|
|
// worker is working it decrements nwait. If nproc == nwait,
|
|
|
|
// there are no workers.
|
|
|
|
work.nproc = ^uint32(0)
|
|
|
|
work.nwait = ^uint32(0)
|
|
|
|
|
|
|
|
// Background GC and assists race to set this to 1 on
|
|
|
|
// completion so that this only gets one "done" signal.
|
|
|
|
work.bgMarkDone = 0
|
|
|
|
|
|
|
|
gcController.bgMarkStartTime = nanotime()
|
|
|
|
}
|
|
|
|
|
|
|
|
func gcBgMarkWorker(p *p) {
|
|
|
|
// Register this G as the background mark worker for p.
|
|
|
|
if p.gcBgMarkWorker != nil {
|
|
|
|
throw("P already has a background mark worker")
|
|
|
|
}
|
|
|
|
gp := getg()
|
|
|
|
|
|
|
|
mp := acquirem()
|
|
|
|
p.gcBgMarkWorker = gp
|
|
|
|
// After this point, the background mark worker is scheduled
|
|
|
|
// cooperatively by gcController.findRunnable. Hence, it must
|
|
|
|
// never be preempted, as this would put it into _Grunnable
|
|
|
|
// and put it on a run queue. Instead, when the preempt flag
|
|
|
|
// is set, this puts itself into _Gwaiting to be woken up by
|
|
|
|
// gcController.findRunnable at the appropriate time.
|
|
|
|
notewakeup(&work.bgMarkReady)
|
|
|
|
for {
|
|
|
|
// Go to sleep until woken by gcContoller.findRunnable.
|
|
|
|
// We can't releasem yet since even the call to gopark
|
|
|
|
// may be preempted.
|
|
|
|
gopark(func(g *g, mp unsafe.Pointer) bool {
|
|
|
|
releasem((*m)(mp))
|
|
|
|
return true
|
2015-04-15 15:01:30 -06:00
|
|
|
}, unsafe.Pointer(mp), "mark worker (idle)", traceEvGoBlock, 0)
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
|
|
|
// Loop until the P dies and disassociates this
|
|
|
|
// worker. (The P may later be reused, in which case
|
|
|
|
// it will get a new worker.)
|
|
|
|
if p.gcBgMarkWorker != gp {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable preemption so we can use the gcw. If the
|
|
|
|
// scheduler wants to preempt us, we'll stop draining,
|
|
|
|
// dispose the gcw, and then preempt.
|
|
|
|
mp = acquirem()
|
|
|
|
|
2015-03-27 15:01:53 -06:00
|
|
|
if gcBlackenEnabled == 0 {
|
|
|
|
throw("gcBgMarkWorker: blackening not enabled")
|
|
|
|
}
|
|
|
|
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
startTime := nanotime()
|
|
|
|
|
|
|
|
xadd(&work.nwait, -1)
|
|
|
|
|
2015-04-15 15:01:30 -06:00
|
|
|
done := false
|
|
|
|
switch p.gcMarkWorkerMode {
|
2015-03-27 15:01:53 -06:00
|
|
|
default:
|
|
|
|
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
|
2015-04-15 15:01:30 -06:00
|
|
|
case gcMarkWorkerDedicatedMode:
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
gcDrain(&p.gcw, gcBgCreditSlack)
|
2015-04-15 15:01:30 -06:00
|
|
|
// gcDrain did the xadd(&work.nwait +1) to
|
|
|
|
// match the decrement above. It only returns
|
|
|
|
// at a mark completion point.
|
|
|
|
done = true
|
|
|
|
case gcMarkWorkerFractionalMode, gcMarkWorkerIdleMode:
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
gcDrainUntilPreempt(&p.gcw, gcBgCreditSlack)
|
2015-04-15 15:01:30 -06:00
|
|
|
// Was this the last worker and did we run out
|
|
|
|
// of work?
|
|
|
|
done = xadd(&work.nwait, +1) == work.nproc && work.full == 0 && work.partial == 0
|
|
|
|
}
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
// We're not in mark termination, so there's no need
|
|
|
|
// to dispose p.gcw.
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
|
2015-04-22 15:44:36 -06:00
|
|
|
// If this worker reached a background mark completion
|
|
|
|
// point, signal the main GC goroutine.
|
|
|
|
if done {
|
|
|
|
gcBgMarkDone()
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
duration := nanotime() - startTime
|
2015-04-15 15:01:30 -06:00
|
|
|
switch p.gcMarkWorkerMode {
|
|
|
|
case gcMarkWorkerDedicatedMode:
|
|
|
|
xaddint64(&gcController.dedicatedMarkTime, duration)
|
|
|
|
case gcMarkWorkerFractionalMode:
|
|
|
|
xaddint64(&gcController.fractionalMarkTime, duration)
|
|
|
|
xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
|
|
|
|
case gcMarkWorkerIdleMode:
|
runtime: multi-threaded, utilization-scheduled background mark
Currently, the concurrent mark phase is performed by the main GC
goroutine. Prior to the previous commit enabling preemption, this
caused marking to always consume 1/GOMAXPROCS of the available CPU
time. If GOMAXPROCS=1, this meant background GC would consume 100% of
the CPU (effectively a STW). If GOMAXPROCS>4, background GC would use
less than the goal of 25%. If GOMAXPROCS=4, background GC would use
the goal 25%, but if the mutator wasn't using the remaining 75%,
background marking wouldn't take advantage of the idle time. Enabling
preemption in the previous commit made GC miss CPU targets in
completely different ways, but set us up to bring everything back in
line.
This change replaces the fixed GC goroutine with per-P background mark
goroutines. Once started, these goroutines don't go in the standard
run queues; instead, they are scheduled specially such that the time
spent in mutator assists and the background mark goroutines totals 25%
of the CPU time available to the program. Furthermore, this lets
background marking take advantage of idle Ps, which significantly
boosts GC performance for applications that under-utilize the CPU.
This requires also changing how time is reported for gctrace, so this
change splits the concurrent mark CPU time into assist/background/idle
scanning.
This also requires increasing the size of the StackRecord slice used
in a GoroutineProfile test.
Change-Id: I0936ff907d2cee6cb687a208f2df47e8988e3157
Reviewed-on: https://go-review.googlesource.com/8850
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-03-23 19:07:33 -06:00
|
|
|
xaddint64(&gcController.idleMarkTime, duration)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-22 15:44:36 -06:00
|
|
|
// gcBgMarkDone signals the completion of background marking. This can
|
|
|
|
// be called multiple times during a cycle; only the first call has
|
|
|
|
// any effect.
|
|
|
|
func gcBgMarkDone() {
|
|
|
|
if cas(&work.bgMarkDone, 0, 1) {
|
|
|
|
// This is the first worker to reach completion.
|
|
|
|
// Signal the main GC goroutine.
|
|
|
|
lock(&work.bgMarkWake.lock)
|
|
|
|
if work.bgMarkWake.g == nil {
|
|
|
|
// It hasn't parked yet.
|
|
|
|
work.bgMarkWake.wake = true
|
|
|
|
} else {
|
|
|
|
ready(work.bgMarkWake.g, 0)
|
|
|
|
}
|
|
|
|
unlock(&work.bgMarkWake.lock)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-01 16:08:44 -06:00
|
|
|
// gcFlushGCWork disposes the gcWork caches of all Ps. The world must
|
|
|
|
// be stopped.
|
|
|
|
//go:nowritebarrier
|
|
|
|
func gcFlushGCWork() {
|
|
|
|
// Gather all cached GC work. All other Ps are stopped, so
|
|
|
|
// it's safe to manipulate their GC work caches.
|
|
|
|
for i := 0; i < int(gomaxprocs); i++ {
|
|
|
|
allp[i].gcw.dispose()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
// gcMark runs the mark (or, for concurrent GC, mark termination)
|
2015-02-19 13:48:40 -07:00
|
|
|
// STW is in effect at this point.
|
|
|
|
//TODO go:nowritebarrier
|
2015-02-19 14:43:27 -07:00
|
|
|
func gcMark(start_time int64) {
|
2014-11-11 15:05:02 -07:00
|
|
|
if debug.allocfreetrace > 0 {
|
|
|
|
tracegc()
|
|
|
|
}
|
|
|
|
|
2015-03-05 15:33:08 -07:00
|
|
|
if gcphase != _GCmarktermination {
|
|
|
|
throw("in gcMark expecting to see gcphase as _GCmarktermination")
|
|
|
|
}
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
t0 := start_time
|
|
|
|
work.tstart = start_time
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-03-05 15:33:08 -07:00
|
|
|
gcCopySpans() // TODO(rlh): should this be hoisted and done only once? Right now it is done for normal marking and also for checkmarking.
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-05-01 16:08:44 -06:00
|
|
|
// Make sure the per-P gcWork caches are empty. During mark
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
// termination, these caches can still be used temporarily,
|
|
|
|
// but must be disposed to the global lists immediately.
|
2015-05-01 16:08:44 -06:00
|
|
|
gcFlushGCWork()
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
work.nwait = 0
|
|
|
|
work.ndone = 0
|
|
|
|
work.nproc = uint32(gcprocs())
|
2014-11-15 06:00:38 -07:00
|
|
|
|
2014-12-12 10:41:57 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCScanStart()
|
|
|
|
}
|
|
|
|
|
2015-01-28 13:49:26 -07:00
|
|
|
parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), false, markroot)
|
2014-11-11 15:05:02 -07:00
|
|
|
if work.nproc > 1 {
|
|
|
|
noteclear(&work.alldone)
|
|
|
|
helpgc(int32(work.nproc))
|
|
|
|
}
|
|
|
|
|
|
|
|
gchelperstart()
|
|
|
|
parfordo(work.markfor)
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
|
runtime: switch to gcWork abstraction
This converts the garbage collector from directly manipulating work
buffers to using the new gcWork abstraction.
The previous management of work buffers was rather ad hoc. As a
result, switching to the gcWork abstraction changes many details of
work buffer management.
If greyobject fills a work buffer, it can now pull from work.partial
in addition to work.empty.
Previously, gcDrain started with a partial or empty work buffer and
fetched an empty work buffer if it filled its current buffer (in
greyobject). Now, gcDrain starts with a full work buffer and fetches
an partial or empty work buffer if it fills its current buffer (in
greyobject). The original behavior was bad because gcDrain would
immediately drop the empty work buffer returned by greyobject and
fetch a full work buffer, which greyobject was likely to immediately
overflow, fetching another empty work buffer, etc. The new behavior
isn't great at the start because greyobject is likely to immediately
overflow the full buffer, but the steady-state behavior should be more
stable. Both before and after this change, gcDrain fetches a full
work buffer if it drains its current buffer. Basically all of these
choices are bad; the right answer is to use a dual work buffer scheme.
Previously, shade always fetched a work buffer (though usually from
m.currentwbuf), even if the object was already marked. Now it only
fetches a work buffer if it actually greys an object.
Change-Id: I8b880ed660eb63135236fa5d5678f0c1c041881f
Reviewed-on: https://go-review.googlesource.com/5232
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-02-17 08:53:31 -07:00
|
|
|
var gcw gcWork
|
2015-03-13 11:29:23 -06:00
|
|
|
gcDrain(&gcw, -1)
|
runtime: switch to gcWork abstraction
This converts the garbage collector from directly manipulating work
buffers to using the new gcWork abstraction.
The previous management of work buffers was rather ad hoc. As a
result, switching to the gcWork abstraction changes many details of
work buffer management.
If greyobject fills a work buffer, it can now pull from work.partial
in addition to work.empty.
Previously, gcDrain started with a partial or empty work buffer and
fetched an empty work buffer if it filled its current buffer (in
greyobject). Now, gcDrain starts with a full work buffer and fetches
an partial or empty work buffer if it fills its current buffer (in
greyobject). The original behavior was bad because gcDrain would
immediately drop the empty work buffer returned by greyobject and
fetch a full work buffer, which greyobject was likely to immediately
overflow, fetching another empty work buffer, etc. The new behavior
isn't great at the start because greyobject is likely to immediately
overflow the full buffer, but the steady-state behavior should be more
stable. Both before and after this change, gcDrain fetches a full
work buffer if it drains its current buffer. Basically all of these
choices are bad; the right answer is to use a dual work buffer scheme.
Previously, shade always fetched a work buffer (though usually from
m.currentwbuf), even if the object was already marked. Now it only
fetches a work buffer if it actually greys an object.
Change-Id: I8b880ed660eb63135236fa5d5678f0c1c041881f
Reviewed-on: https://go-review.googlesource.com/5232
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-02-17 08:53:31 -07:00
|
|
|
gcw.dispose()
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2014-11-15 06:00:38 -07:00
|
|
|
if work.full != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("work.full != 0")
|
2014-11-15 06:00:38 -07:00
|
|
|
}
|
|
|
|
if work.partial != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("work.partial != 0")
|
2014-11-15 06:00:38 -07:00
|
|
|
}
|
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
if work.nproc > 1 {
|
|
|
|
notesleep(&work.alldone)
|
|
|
|
}
|
|
|
|
|
runtime: replace per-M workbuf cache with per-P gcWork cache
Currently, each M has a cache of the most recently used *workbuf. This
is used primarily by the write barrier so it doesn't have to access
the global workbuf lists on every write barrier. It's also used by
stack scanning because it's convenient.
This cache is important for write barrier performance, but this
particular approach has several downsides. It's faster than no cache,
but far from optimal (as the benchmarks below show). It's complex:
access to the cache is sprinkled through most of the workbuf list
operations and it requires special care to transform into and back out
of the gcWork cache that's actually used for scanning and marking. It
requires atomic exchanges to take ownership of the cached workbuf and
to return it to the M's cache even though it's almost always used by
only the current M. Since it's per-M, flushing these caches is O(# of
Ms), which may be high. And it has some significant subtleties: for
example, in general the cache shouldn't be used after the
harvestwbufs() in mark termination because it could hide work from
mark termination, but stack scanning can happen after this and *will*
use the cache (but it turns out this is okay because it will always be
followed by a getfull(), which drains the cache).
This change replaces this cache with a per-P gcWork object. This
gcWork cache can be used directly by scanning and marking (as long as
preemption is disabled, which is a general requirement of gcWork).
Since it's per-P, it doesn't require synchronization, which simplifies
things and means the only atomic operations in the write barrier are
occasionally fetching new work buffers and setting a mark bit if the
object isn't already marked. This cache can be flushed in O(# of Ps),
which is generally small. It follows a simple flushing rule: the cache
can be used during any phase, but during mark termination it must be
flushed before allowing preemption. This also makes the dispose during
mutator assist no longer necessary, which eliminates the vast majority
of gcWork dispose calls and reduces contention on the global workbuf
lists. And it's a lot faster on some benchmarks:
benchmark old ns/op new ns/op delta
BenchmarkBinaryTree17 11963668673 11206112763 -6.33%
BenchmarkFannkuch11 2643217136 2649182499 +0.23%
BenchmarkFmtFprintfEmpty 70.4 70.2 -0.28%
BenchmarkFmtFprintfString 364 307 -15.66%
BenchmarkFmtFprintfInt 317 282 -11.04%
BenchmarkFmtFprintfIntInt 512 483 -5.66%
BenchmarkFmtFprintfPrefixedInt 404 380 -5.94%
BenchmarkFmtFprintfFloat 521 479 -8.06%
BenchmarkFmtManyArgs 2164 1894 -12.48%
BenchmarkGobDecode 30366146 22429593 -26.14%
BenchmarkGobEncode 29867472 26663152 -10.73%
BenchmarkGzip 391236616 396779490 +1.42%
BenchmarkGunzip 96639491 96297024 -0.35%
BenchmarkHTTPClientServer 100110 70763 -29.31%
BenchmarkJSONEncode 51866051 52511382 +1.24%
BenchmarkJSONDecode 103813138 86094963 -17.07%
BenchmarkMandelbrot200 4121834 4120886 -0.02%
BenchmarkGoParse 16472789 5879949 -64.31%
BenchmarkRegexpMatchEasy0_32 140 140 +0.00%
BenchmarkRegexpMatchEasy0_1K 394 394 +0.00%
BenchmarkRegexpMatchEasy1_32 120 120 +0.00%
BenchmarkRegexpMatchEasy1_1K 621 614 -1.13%
BenchmarkRegexpMatchMedium_32 209 202 -3.35%
BenchmarkRegexpMatchMedium_1K 54889 55175 +0.52%
BenchmarkRegexpMatchHard_32 2682 2675 -0.26%
BenchmarkRegexpMatchHard_1K 79383 79524 +0.18%
BenchmarkRevcomp 584116718 584595320 +0.08%
BenchmarkTemplate 125400565 109620196 -12.58%
BenchmarkTimeParse 386 387 +0.26%
BenchmarkTimeFormat 580 447 -22.93%
(Best out of 10 runs. The delta of averages is similar.)
This also puts us in a good position to flush these caches when
nearing the end of concurrent marking, which will let us increase the
size of the work buffers while still controlling mark termination
pause time.
Change-Id: I2dd94c8517a19297a98ec280203cccaa58792522
Reviewed-on: https://go-review.googlesource.com/9178
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
2015-04-19 13:22:20 -06:00
|
|
|
for i := 0; i < int(gomaxprocs); i++ {
|
|
|
|
if allp[i].gcw.wbuf != 0 {
|
|
|
|
throw("P has cached GC work at end of mark termination")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-12 10:41:57 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCScanDone()
|
|
|
|
}
|
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
shrinkfinish()
|
|
|
|
|
|
|
|
cachestats()
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
|
runtime: use reachable heap estimate to set trigger/goal
Currently, we set the heap goal for the next GC cycle using the size
of the marked heap at the end of the current cycle. This can lead to a
bad feedback loop if the mutator is rapidly allocating and releasing
pointers that can significantly bloat heap size.
If the GC were STW, the marked heap size would be exactly the
reachable heap size (call it stwLive). However, in concurrent GC,
marked=stwLive+floatLive, where floatLive is the amount of "floating
garbage": objects that were reachable at some point during the cycle
and were marked, but which are no longer reachable by the end of the
cycle. If the GC cycle is short, then the mutator doesn't have much
time to create floating garbage, so marked≈stwLive. However, if the GC
cycle is long and the mutator is allocating and creating floating
garbage very rapidly, then it's possible that marked≫stwLive. Since
the runtime currently sets the heap goal based on marked, this will
cause it to set a high heap goal. This means that 1) the next GC cycle
will take longer because of the larger heap and 2) the assist ratio
will be low because of the large distance between the trigger and the
goal. The combination of these lets the mutator produce even more
floating garbage in the next cycle, which further exacerbates the
problem.
For example, on the garbage benchmark with GOMAXPROCS=1, this causes
the heap to grow to ~500MB and the garbage collector to retain upwards
of ~300MB of heap, while the true reachable heap size is ~32MB. This,
in turn, causes the GC cycle to take upwards of ~3 seconds.
Fix this bad feedback loop by estimating the true reachable heap size
(stwLive) and using this rather than the marked heap size
(stwLive+floatLive) as the basis for the GC trigger and heap goal.
This breaks the bad feedback loop and causes the mutator to assist
more, which decreases the rate at which it can create floating
garbage. On the same garbage benchmark, this reduces the maximum heap
size to ~73MB, the retained heap to ~40MB, and the duration of the GC
cycle to ~200ms.
Change-Id: I7712244c94240743b266f9eb720c03802799cdd1
Reviewed-on: https://go-review.googlesource.com/9177
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-21 12:24:25 -06:00
|
|
|
// Compute the reachable heap size at the beginning of the
|
|
|
|
// cycle. This is approximately the marked heap size at the
|
|
|
|
// end (which we know) minus the amount of marked heap that
|
|
|
|
// was allocated after marking began (which we don't know, but
|
|
|
|
// is approximately the amount of heap that was allocated
|
|
|
|
// since marking began).
|
runtime: fix underflow in next_gc calculation
Currently, it's possible for the next_gc calculation to underflow.
Since next_gc is unsigned, this wraps around and effectively disables
GC for the rest of the program's execution. Besides being obviously
wrong, this is causing test failures on 32-bit because some tests are
running out of heap.
This underflow happens for two reasons, both having to do with how we
estimate the reachable heap size at the end of the GC cycle.
One reason is that this calculation depends on the value of heap_live
at the beginning of the GC cycle, but we currently only record that
value during a concurrent GC and not during a forced STW GC. Fix this
by moving the recorded value from gcController to work and recording
it on a common code path.
The other reason is that we use the amount of allocation during the GC
cycle as an approximation of the amount of floating garbage and
subtract it from the marked heap to estimate the reachable heap.
However, since this is only an approximation, it's possible for the
amount of allocation during the cycle to be *larger* than the marked
heap size (since the runtime allocates white and it's possible for
these allocations to never be made reachable from the heap). Currently
this causes wrap-around in our estimate of the reachable heap size,
which in turn causes wrap-around in next_gc. Fix this by bottoming out
the reachable heap estimate at 0, in which case we just fall back to
triggering GC at heapminimum (which is okay since this only happens on
small heaps).
Fixes #10555, fixes #10556, and fixes #10559.
Change-Id: Iad07b529c03772356fede2ae557732f13ebfdb63
Reviewed-on: https://go-review.googlesource.com/9286
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-23 11:02:31 -06:00
|
|
|
allocatedDuringCycle := memstats.heap_live - work.initialHeapLive
|
|
|
|
if work.bytesMarked >= allocatedDuringCycle {
|
|
|
|
memstats.heap_reachable = work.bytesMarked - allocatedDuringCycle
|
|
|
|
} else {
|
|
|
|
// This can happen if most of the allocation during
|
|
|
|
// the cycle never became reachable from the heap.
|
|
|
|
// Just set the reachable heap appropriation to 0 and
|
|
|
|
// let the heapminimum kick in below.
|
|
|
|
memstats.heap_reachable = 0
|
|
|
|
}
|
runtime: use reachable heap estimate to set trigger/goal
Currently, we set the heap goal for the next GC cycle using the size
of the marked heap at the end of the current cycle. This can lead to a
bad feedback loop if the mutator is rapidly allocating and releasing
pointers that can significantly bloat heap size.
If the GC were STW, the marked heap size would be exactly the
reachable heap size (call it stwLive). However, in concurrent GC,
marked=stwLive+floatLive, where floatLive is the amount of "floating
garbage": objects that were reachable at some point during the cycle
and were marked, but which are no longer reachable by the end of the
cycle. If the GC cycle is short, then the mutator doesn't have much
time to create floating garbage, so marked≈stwLive. However, if the GC
cycle is long and the mutator is allocating and creating floating
garbage very rapidly, then it's possible that marked≫stwLive. Since
the runtime currently sets the heap goal based on marked, this will
cause it to set a high heap goal. This means that 1) the next GC cycle
will take longer because of the larger heap and 2) the assist ratio
will be low because of the large distance between the trigger and the
goal. The combination of these lets the mutator produce even more
floating garbage in the next cycle, which further exacerbates the
problem.
For example, on the garbage benchmark with GOMAXPROCS=1, this causes
the heap to grow to ~500MB and the garbage collector to retain upwards
of ~300MB of heap, while the true reachable heap size is ~32MB. This,
in turn, causes the GC cycle to take upwards of ~3 seconds.
Fix this bad feedback loop by estimating the true reachable heap size
(stwLive) and using this rather than the marked heap size
(stwLive+floatLive) as the basis for the GC trigger and heap goal.
This breaks the bad feedback loop and causes the mutator to assist
more, which decreases the rate at which it can create floating
garbage. On the same garbage benchmark, this reduces the maximum heap
size to ~73MB, the retained heap to ~40MB, and the duration of the GC
cycle to ~200ms.
Change-Id: I7712244c94240743b266f9eb720c03802799cdd1
Reviewed-on: https://go-review.googlesource.com/9177
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-21 12:24:25 -06:00
|
|
|
|
|
|
|
// Trigger the next GC cycle when the allocated heap has grown
|
|
|
|
// by triggerRatio over the reachable heap size. Assume that
|
|
|
|
// we're in steady state, so the reachable heap size is the
|
|
|
|
// same now as it was at the beginning of the GC cycle.
|
|
|
|
memstats.next_gc = uint64(float64(memstats.heap_reachable) * (1 + gcController.triggerRatio))
|
2015-01-28 13:57:46 -07:00
|
|
|
if memstats.next_gc < heapminimum {
|
|
|
|
memstats.next_gc = heapminimum
|
|
|
|
}
|
runtime: fix underflow in next_gc calculation
Currently, it's possible for the next_gc calculation to underflow.
Since next_gc is unsigned, this wraps around and effectively disables
GC for the rest of the program's execution. Besides being obviously
wrong, this is causing test failures on 32-bit because some tests are
running out of heap.
This underflow happens for two reasons, both having to do with how we
estimate the reachable heap size at the end of the GC cycle.
One reason is that this calculation depends on the value of heap_live
at the beginning of the GC cycle, but we currently only record that
value during a concurrent GC and not during a forced STW GC. Fix this
by moving the recorded value from gcController to work and recording
it on a common code path.
The other reason is that we use the amount of allocation during the GC
cycle as an approximation of the amount of floating garbage and
subtract it from the marked heap to estimate the reachable heap.
However, since this is only an approximation, it's possible for the
amount of allocation during the cycle to be *larger* than the marked
heap size (since the runtime allocates white and it's possible for
these allocations to never be made reachable from the heap). Currently
this causes wrap-around in our estimate of the reachable heap size,
which in turn causes wrap-around in next_gc. Fix this by bottoming out
the reachable heap estimate at 0, in which case we just fall back to
triggering GC at heapminimum (which is okay since this only happens on
small heaps).
Fixes #10555, fixes #10556, and fixes #10559.
Change-Id: Iad07b529c03772356fede2ae557732f13ebfdb63
Reviewed-on: https://go-review.googlesource.com/9286
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-23 11:02:31 -06:00
|
|
|
if int64(memstats.next_gc) < 0 {
|
|
|
|
print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n")
|
|
|
|
throw("next_gc underflow")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update other GC heap size stats.
|
|
|
|
memstats.heap_live = work.bytesMarked
|
|
|
|
memstats.heap_marked = work.bytesMarked
|
2015-05-04 14:10:49 -06:00
|
|
|
memstats.heap_scan = uint64(gcController.scanWork)
|
2015-01-28 13:57:46 -07:00
|
|
|
|
2014-12-12 10:41:57 -07:00
|
|
|
if trace.enabled {
|
runtime: introduce heap_live; replace use of heap_alloc in GC
Currently there are two main consumers of memstats.heap_alloc:
updatememstats (aka ReadMemStats) and shouldtriggergc.
updatememstats recomputes heap_alloc from the ground up, so we don't
need to keep heap_alloc up to date for it. shouldtriggergc wants to
know how many bytes were marked by the previous GC plus how many bytes
have been allocated since then, but this *isn't* what heap_alloc
tracks. heap_alloc also includes objects that are not marked and
haven't yet been swept.
Introduce a new memstat called heap_live that actually tracks what
shouldtriggergc wants to know and stop keeping heap_alloc up to date.
Unlike heap_alloc, heap_live follows a simple sawtooth that drops
during each mark termination and increases monotonically between GCs.
heap_alloc, on the other hand, has much more complicated behavior: it
may drop during sweep termination, slowly decreases from background
sweeping between GCs, is roughly unaffected by allocation as long as
there are unswept spans (because we sweep and allocate at the same
rate), and may go up after background sweeping is done depending on
the GC trigger.
heap_live simplifies computing next_gc and using it to figure out when
to trigger garbage collection. Currently, we guess next_gc at the end
of a cycle and update it as we sweep and get a better idea of how much
heap was marked. Now, since we're directly tracking how much heap is
marked, we can directly compute next_gc.
This also corrects bugs that could cause us to trigger GC early.
Currently, in any case where sweep termination actually finds spans to
sweep, heap_alloc is an overestimation of live heap, so we'll trigger
GC too early. heap_live, on the other hand, is unaffected by sweeping.
Change-Id: I1f96807b6ed60d4156e8173a8e68745ffc742388
Reviewed-on: https://go-review.googlesource.com/8389
Reviewed-by: Russ Cox <rsc@golang.org>
2015-03-30 16:01:32 -06:00
|
|
|
traceHeapAlloc()
|
2014-12-12 10:41:57 -07:00
|
|
|
traceNextGC()
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
|
|
|
t4 := nanotime()
|
|
|
|
atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
|
|
|
|
memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(t4 - t0)
|
|
|
|
memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(t4)
|
|
|
|
memstats.pause_total_ns += uint64(t4 - t0)
|
2015-02-19 14:43:27 -07:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
func gcSweep(mode int) {
|
2015-03-05 15:33:08 -07:00
|
|
|
if gcphase != _GCoff {
|
|
|
|
throw("gcSweep being done but phase is not GCoff")
|
|
|
|
}
|
2015-02-19 14:43:27 -07:00
|
|
|
gcCopySpans()
|
2014-11-15 06:00:38 -07:00
|
|
|
|
2015-02-19 14:21:42 -07:00
|
|
|
lock(&mheap_.lock)
|
2014-11-11 15:05:02 -07:00
|
|
|
mheap_.sweepgen += 2
|
|
|
|
mheap_.sweepdone = 0
|
|
|
|
sweep.spanidx = 0
|
|
|
|
unlock(&mheap_.lock)
|
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
if !_ConcurrentSweep || mode == gcForceBlockMode {
|
|
|
|
// Special case synchronous sweep.
|
runtime: finish sweeping before concurrent GC starts
Currently, the concurrent sweep follows a 1:1 rule: when allocation
needs a span, it sweeps a span (likewise, when a large allocation
needs N pages, it sweeps until it frees N pages). This rule worked
well for the STW collector (especially when GOGC==100) because it did
no more sweeping than necessary to keep the heap from growing, would
generally finish sweeping just before GC, and ensured good temporal
locality between sweeping a page and allocating from it.
It doesn't work well with concurrent GC. Since concurrent GC requires
starting GC earlier (sometimes much earlier), the sweep often won't be
done when GC starts. Unfortunately, the first thing GC has to do is
finish the sweep. In the mean time, the mutator can continue
allocating, pushing the heap size even closer to the goal size. This
worked okay with the 7/8ths trigger, but it gets into a vicious cycle
with the GC trigger controller: if the mutator is allocating quickly
and driving the trigger lower, more and more sweep work will be left
to GC; this both causes GC to take longer (allowing the mutator to
allocate more during GC) and delays the start of the concurrent mark
phase, which throws off the GC controller's statistics and generally
causes it to push the trigger even lower.
As an example of a particularly bad case, the garbage benchmark with
GOMAXPROCS=4 and -benchmem 512 (MB) spends the first 0.4-0.8 seconds
of each GC cycle sweeping, during which the heap grows by between
109MB and 252MB.
To fix this, this change replaces the 1:1 sweep rule with a
proportional sweep rule. At the end of GC, GC knows exactly how much
heap allocation will occur before the next concurrent GC as well as
how many span pages must be swept. This change computes this "sweep
ratio" and when the mallocgc asks for a span, the mcentral sweeps
enough spans to bring the swept span count into ratio with the
allocated byte count.
On the benchmark from above, this entirely eliminates sweeping at the
beginning of GC, which reduces the time between startGC readying the
GC goroutine and GC stopping the world for sweep termination to ~100µs
during which the heap grows at most 134KB.
Change-Id: I35422d6bba0c2310d48bb1f8f30a72d29e98c1af
Reviewed-on: https://go-review.googlesource.com/8921
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-13 21:34:57 -06:00
|
|
|
// Record that no proportional sweeping has to happen.
|
|
|
|
lock(&mheap_.lock)
|
|
|
|
mheap_.sweepPagesPerByte = 0
|
|
|
|
mheap_.pagesSwept = 0
|
|
|
|
unlock(&mheap_.lock)
|
2014-11-11 15:05:02 -07:00
|
|
|
// Sweep all spans eagerly.
|
|
|
|
for sweepone() != ^uintptr(0) {
|
|
|
|
sweep.npausesweep++
|
|
|
|
}
|
|
|
|
// Do an additional mProf_GC, because all 'free' events are now real as well.
|
|
|
|
mProf_GC()
|
2015-02-19 14:43:27 -07:00
|
|
|
mProf_GC()
|
|
|
|
return
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
runtime: finish sweeping before concurrent GC starts
Currently, the concurrent sweep follows a 1:1 rule: when allocation
needs a span, it sweeps a span (likewise, when a large allocation
needs N pages, it sweeps until it frees N pages). This rule worked
well for the STW collector (especially when GOGC==100) because it did
no more sweeping than necessary to keep the heap from growing, would
generally finish sweeping just before GC, and ensured good temporal
locality between sweeping a page and allocating from it.
It doesn't work well with concurrent GC. Since concurrent GC requires
starting GC earlier (sometimes much earlier), the sweep often won't be
done when GC starts. Unfortunately, the first thing GC has to do is
finish the sweep. In the mean time, the mutator can continue
allocating, pushing the heap size even closer to the goal size. This
worked okay with the 7/8ths trigger, but it gets into a vicious cycle
with the GC trigger controller: if the mutator is allocating quickly
and driving the trigger lower, more and more sweep work will be left
to GC; this both causes GC to take longer (allowing the mutator to
allocate more during GC) and delays the start of the concurrent mark
phase, which throws off the GC controller's statistics and generally
causes it to push the trigger even lower.
As an example of a particularly bad case, the garbage benchmark with
GOMAXPROCS=4 and -benchmem 512 (MB) spends the first 0.4-0.8 seconds
of each GC cycle sweeping, during which the heap grows by between
109MB and 252MB.
To fix this, this change replaces the 1:1 sweep rule with a
proportional sweep rule. At the end of GC, GC knows exactly how much
heap allocation will occur before the next concurrent GC as well as
how many span pages must be swept. This change computes this "sweep
ratio" and when the mallocgc asks for a span, the mcentral sweeps
enough spans to bring the swept span count into ratio with the
allocated byte count.
On the benchmark from above, this entirely eliminates sweeping at the
beginning of GC, which reduces the time between startGC readying the
GC goroutine and GC stopping the world for sweep termination to ~100µs
during which the heap grows at most 134KB.
Change-Id: I35422d6bba0c2310d48bb1f8f30a72d29e98c1af
Reviewed-on: https://go-review.googlesource.com/8921
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-04-13 21:34:57 -06:00
|
|
|
// Account how much sweeping needs to be done before the next
|
|
|
|
// GC cycle and set up proportional sweep statistics.
|
|
|
|
var pagesToSweep uintptr
|
|
|
|
for _, s := range work.spans {
|
|
|
|
if s.state == mSpanInUse {
|
|
|
|
pagesToSweep += s.npages
|
|
|
|
}
|
|
|
|
}
|
|
|
|
heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
|
|
|
|
// Add a little margin so rounding errors and concurrent
|
|
|
|
// sweep are less likely to leave pages unswept when GC starts.
|
|
|
|
heapDistance -= 1024 * 1024
|
|
|
|
if heapDistance < _PageSize {
|
|
|
|
// Avoid setting the sweep ratio extremely high
|
|
|
|
heapDistance = _PageSize
|
|
|
|
}
|
|
|
|
lock(&mheap_.lock)
|
|
|
|
mheap_.sweepPagesPerByte = float64(pagesToSweep) / float64(heapDistance)
|
|
|
|
mheap_.pagesSwept = 0
|
|
|
|
unlock(&mheap_.lock)
|
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
// Background sweep.
|
|
|
|
lock(&sweep.lock)
|
2015-03-05 14:04:17 -07:00
|
|
|
if sweep.parked {
|
2015-02-19 14:43:27 -07:00
|
|
|
sweep.parked = false
|
2015-02-21 11:01:40 -07:00
|
|
|
ready(sweep.g, 0)
|
2015-02-19 14:43:27 -07:00
|
|
|
}
|
|
|
|
unlock(&sweep.lock)
|
2014-11-11 15:05:02 -07:00
|
|
|
mProf_GC()
|
2015-02-19 14:43:27 -07:00
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 14:43:27 -07:00
|
|
|
func gcCopySpans() {
|
|
|
|
// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
|
|
|
|
// resizing/freeing allspans.
|
|
|
|
// New spans can be created while GC progresses, but they are not garbage for
|
|
|
|
// this round:
|
|
|
|
// - new stack spans can be created even while the world is stopped.
|
|
|
|
// - new malloc spans can be created during the concurrent sweep
|
|
|
|
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
|
|
|
|
lock(&mheap_.lock)
|
|
|
|
// Free the old cached mark array if necessary.
|
|
|
|
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
|
|
|
|
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-02-19 14:43:27 -07:00
|
|
|
// Cache the current array for sweeping.
|
|
|
|
mheap_.gcspans = mheap_.allspans
|
|
|
|
work.spans = h_allspans
|
|
|
|
unlock(&mheap_.lock)
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-24 20:20:38 -07:00
|
|
|
// gcResetGState resets the GC state of all G's and returns the length
|
|
|
|
// of allgs.
|
2015-02-24 20:29:33 -07:00
|
|
|
func gcResetGState() (numgs int) {
|
2015-02-24 20:20:38 -07:00
|
|
|
// This may be called during a concurrent phase, so make sure
|
|
|
|
// allgs doesn't change.
|
|
|
|
lock(&allglock)
|
2015-02-24 20:29:33 -07:00
|
|
|
for _, gp := range allgs {
|
2015-02-24 20:20:38 -07:00
|
|
|
gp.gcworkdone = false // set to true in gcphasework
|
|
|
|
gp.gcscanvalid = false // stack has not been scanned
|
2015-03-16 12:22:00 -06:00
|
|
|
gp.gcalloc = 0
|
|
|
|
gp.gcscanwork = 0
|
2015-02-24 20:20:38 -07:00
|
|
|
}
|
2015-02-24 20:29:33 -07:00
|
|
|
numgs = len(allgs)
|
2015-02-24 20:20:38 -07:00
|
|
|
unlock(&allglock)
|
2015-02-24 20:29:33 -07:00
|
|
|
return
|
2015-02-24 20:20:38 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// Hooks for other packages
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
var poolcleanup func()
|
|
|
|
|
|
|
|
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
|
|
|
|
func sync_runtime_registerPoolCleanup(f func()) {
|
|
|
|
poolcleanup = f
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
func clearpools() {
|
|
|
|
// clear sync.Pools
|
|
|
|
if poolcleanup != nil {
|
|
|
|
poolcleanup()
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-02 14:33:02 -07:00
|
|
|
// Clear central sudog cache.
|
|
|
|
// Leave per-P caches alone, they have strictly bounded size.
|
|
|
|
// Disconnect cached list before dropping it on the floor,
|
|
|
|
// so that a dangling ref to one entry does not pin all of them.
|
|
|
|
lock(&sched.sudoglock)
|
|
|
|
var sg, sgnext *sudog
|
|
|
|
for sg = sched.sudogcache; sg != nil; sg = sgnext {
|
|
|
|
sgnext = sg.next
|
|
|
|
sg.next = nil
|
|
|
|
}
|
|
|
|
sched.sudogcache = nil
|
|
|
|
unlock(&sched.sudoglock)
|
|
|
|
|
2015-02-05 06:35:41 -07:00
|
|
|
// Clear central defer pools.
|
|
|
|
// Leave per-P pools alone, they have strictly bounded size.
|
|
|
|
lock(&sched.deferlock)
|
|
|
|
for i := range sched.deferpool {
|
|
|
|
// disconnect cached list before dropping it on the floor,
|
|
|
|
// so that a dangling ref to one entry does not pin all of them.
|
|
|
|
var d, dlink *_defer
|
|
|
|
for d = sched.deferpool[i]; d != nil; d = dlink {
|
|
|
|
dlink = d.link
|
|
|
|
d.link = nil
|
|
|
|
}
|
|
|
|
sched.deferpool[i] = nil
|
|
|
|
}
|
|
|
|
unlock(&sched.deferlock)
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
for _, p := range &allp {
|
|
|
|
if p == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// clear tinyalloc pool
|
|
|
|
if c := p.mcache; c != nil {
|
|
|
|
c.tiny = nil
|
|
|
|
c.tinyoffset = 0
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Timing
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
//go:nowritebarrier
|
|
|
|
func gchelper() {
|
|
|
|
_g_ := getg()
|
|
|
|
_g_.m.traceback = 2
|
|
|
|
gchelperstart()
|
|
|
|
|
|
|
|
if trace.enabled {
|
|
|
|
traceGCScanStart()
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
// parallel mark for over GC roots
|
|
|
|
parfordo(work.markfor)
|
|
|
|
if gcphase != _GCscan {
|
|
|
|
var gcw gcWork
|
2015-03-13 11:29:23 -06:00
|
|
|
gcDrain(&gcw, -1) // blocks in getfull
|
2015-02-19 11:38:46 -07:00
|
|
|
gcw.dispose()
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
|
2015-02-19 11:38:46 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCScanDone()
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
2015-02-19 11:38:46 -07:00
|
|
|
|
|
|
|
nproc := work.nproc // work.nproc can change right after we increment work.ndone
|
|
|
|
if xadd(&work.ndone, +1) == nproc-1 {
|
|
|
|
notewakeup(&work.alldone)
|
|
|
|
}
|
|
|
|
_g_.m.traceback = 0
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func gchelperstart() {
|
|
|
|
_g_ := getg()
|
|
|
|
|
|
|
|
if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("gchelperstart: bad m->helpgc")
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
if _g_ != _g_.m.g0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("gchelper not running on g0 stack")
|
2014-11-11 15:05:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-26 16:48:42 -06:00
|
|
|
// itoaDiv formats val/(10**dec) into buf.
|
|
|
|
func itoaDiv(buf []byte, val uint64, dec int) []byte {
|
|
|
|
i := len(buf) - 1
|
|
|
|
idec := i - dec
|
|
|
|
for val >= 10 || i >= idec {
|
|
|
|
buf[i] = byte(val%10 + '0')
|
|
|
|
i--
|
|
|
|
if i == idec {
|
|
|
|
buf[i] = '.'
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
val /= 10
|
|
|
|
}
|
|
|
|
buf[i] = byte(val + '0')
|
|
|
|
return buf[i:]
|
|
|
|
}
|