2014-07-30 10:01:52 -06:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2014-08-15 13:22:33 -06:00
|
|
|
import "unsafe"
|
2014-07-30 10:01:52 -06:00
|
|
|
|
|
|
|
const (
|
2014-08-07 03:34:30 -06:00
|
|
|
debugMalloc = false
|
|
|
|
|
2014-09-16 08:22:15 -06:00
|
|
|
flagNoScan = _FlagNoScan
|
|
|
|
flagNoZero = _FlagNoZero
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-09-16 08:22:15 -06:00
|
|
|
maxTinySize = _TinySize
|
|
|
|
tinySizeClass = _TinySizeClass
|
|
|
|
maxSmallSize = _MaxSmallSize
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-09-16 08:22:15 -06:00
|
|
|
pageShift = _PageShift
|
|
|
|
pageSize = _PageSize
|
|
|
|
pageMask = _PageMask
|
2014-08-07 03:34:30 -06:00
|
|
|
|
2014-09-16 08:22:15 -06:00
|
|
|
mSpanInUse = _MSpanInUse
|
2014-08-28 14:23:10 -06:00
|
|
|
|
2014-11-11 15:05:02 -07:00
|
|
|
concurrentSweep = _ConcurrentSweep
|
2014-07-30 10:01:52 -06:00
|
|
|
)
|
|
|
|
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
// Page number (address>>pageShift)
|
|
|
|
type pageID uintptr
|
|
|
|
|
2014-09-16 08:22:15 -06:00
|
|
|
// base address for all 0-byte allocations
|
|
|
|
var zerobase uintptr
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2015-01-13 13:36:42 -07:00
|
|
|
// Trigger the concurrent GC when 1/triggerratio memory is available to allocate.
|
|
|
|
// Adjust this ratio as part of a scheme to ensure that mutators have enough
|
|
|
|
// memory to allocate in durring a concurrent GC cycle.
|
|
|
|
var triggerratio = int64(8)
|
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
// Determine whether to initiate a GC.
|
|
|
|
// If the GC is already working no need to trigger another one.
|
|
|
|
// This should establish a feedback loop where if the GC does not
|
|
|
|
// have sufficient time to complete then more memory will be
|
|
|
|
// requested from the OS increasing heap size thus allow future
|
|
|
|
// GCs more time to complete.
|
|
|
|
// memstat.heap_alloc and memstat.next_gc reads have benign races
|
|
|
|
// A false negative simple does not start a GC, a false positive
|
|
|
|
// will start a GC needlessly. Neither have correctness issues.
|
|
|
|
func shouldtriggergc() bool {
|
2015-01-13 13:36:42 -07:00
|
|
|
return triggerratio*(int64(memstats.next_gc)-int64(memstats.heap_alloc)) <= int64(memstats.next_gc) && atomicloaduint(&bggc.working) == 0
|
2015-01-06 12:58:49 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 07:03:06 -06:00
|
|
|
// Allocate an object of size bytes.
|
|
|
|
// Small objects are allocated from the per-P cache's free lists.
|
2014-07-30 10:01:52 -06:00
|
|
|
// Large objects (> 32 kB) are allocated straight from the heap.
|
2014-11-03 11:26:46 -07:00
|
|
|
func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
2015-01-06 12:58:49 -07:00
|
|
|
shouldhelpgc := false
|
2014-07-30 10:01:52 -06:00
|
|
|
if size == 0 {
|
2014-09-16 08:22:15 -06:00
|
|
|
return unsafe.Pointer(&zerobase)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
2015-01-16 12:43:38 -07:00
|
|
|
dataSize := size
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-09-08 23:08:34 -06:00
|
|
|
if flags&flagNoScan == 0 && typ == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("malloc missing type")
|
2014-09-08 23:08:34 -06:00
|
|
|
}
|
|
|
|
|
2015-01-16 12:43:38 -07:00
|
|
|
// Set mp.mallocing to keep from being preempted by GC.
|
|
|
|
mp := acquirem()
|
|
|
|
if mp.mallocing != 0 {
|
|
|
|
throw("malloc deadlock")
|
2014-08-18 06:33:39 -06:00
|
|
|
}
|
2015-01-16 12:43:38 -07:00
|
|
|
mp.mallocing = 1
|
2014-08-18 06:33:39 -06:00
|
|
|
|
|
|
|
c := gomcache()
|
2014-07-30 10:01:52 -06:00
|
|
|
var s *mspan
|
|
|
|
var x unsafe.Pointer
|
|
|
|
if size <= maxSmallSize {
|
|
|
|
if flags&flagNoScan != 0 && size < maxTinySize {
|
|
|
|
// Tiny allocator.
|
|
|
|
//
|
|
|
|
// Tiny allocator combines several tiny allocation requests
|
|
|
|
// into a single memory block. The resulting memory block
|
|
|
|
// is freed when all subobjects are unreachable. The subobjects
|
|
|
|
// must be FlagNoScan (don't have pointers), this ensures that
|
|
|
|
// the amount of potentially wasted memory is bounded.
|
|
|
|
//
|
|
|
|
// Size of the memory block used for combining (maxTinySize) is tunable.
|
|
|
|
// Current setting is 16 bytes, which relates to 2x worst case memory
|
|
|
|
// wastage (when all but one subobjects are unreachable).
|
|
|
|
// 8 bytes would result in no wastage at all, but provides less
|
|
|
|
// opportunities for combining.
|
|
|
|
// 32 bytes provides more opportunities for combining,
|
|
|
|
// but can lead to 4x worst case wastage.
|
|
|
|
// The best case winning is 8x regardless of block size.
|
|
|
|
//
|
|
|
|
// Objects obtained from tiny allocator must not be freed explicitly.
|
|
|
|
// So when an object will be freed explicitly, we ensure that
|
|
|
|
// its size >= maxTinySize.
|
|
|
|
//
|
|
|
|
// SetFinalizer has a special case for objects potentially coming
|
|
|
|
// from tiny allocator, it such case it allows to set finalizers
|
|
|
|
// for an inner byte of a memory block.
|
|
|
|
//
|
|
|
|
// The main targets of tiny allocator are small strings and
|
|
|
|
// standalone escaping variables. On a json benchmark
|
|
|
|
// the allocator reduces number of allocations by ~12% and
|
|
|
|
// reduces heap size by ~20%.
|
2015-01-14 12:13:55 -07:00
|
|
|
off := c.tinyoffset
|
|
|
|
// Align tiny pointer for required (conservative) alignment.
|
|
|
|
if size&7 == 0 {
|
|
|
|
off = round(off, 8)
|
|
|
|
} else if size&3 == 0 {
|
|
|
|
off = round(off, 4)
|
|
|
|
} else if size&1 == 0 {
|
|
|
|
off = round(off, 2)
|
|
|
|
}
|
2015-01-14 13:48:32 -07:00
|
|
|
if off+size <= maxTinySize && c.tiny != nil {
|
2015-01-14 12:13:55 -07:00
|
|
|
// The object fits into existing tiny block.
|
|
|
|
x = add(c.tiny, off)
|
|
|
|
c.tinyoffset = off + size
|
|
|
|
c.local_tinyallocs++
|
2015-01-16 12:43:38 -07:00
|
|
|
mp.mallocing = 0
|
|
|
|
releasem(mp)
|
2015-01-14 12:13:55 -07:00
|
|
|
return x
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
// Allocate a new maxTinySize block.
|
|
|
|
s = c.alloc[tinySizeClass]
|
|
|
|
v := s.freelist
|
2014-11-20 10:08:13 -07:00
|
|
|
if v.ptr() == nil {
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:05:02 -07:00
|
|
|
mCache_Refill(c, tinySizeClass)
|
|
|
|
})
|
2015-01-06 12:58:49 -07:00
|
|
|
shouldhelpgc = true
|
2014-07-30 10:01:52 -06:00
|
|
|
s = c.alloc[tinySizeClass]
|
|
|
|
v = s.freelist
|
|
|
|
}
|
2014-11-20 10:08:13 -07:00
|
|
|
s.freelist = v.ptr().next
|
2014-07-30 10:01:52 -06:00
|
|
|
s.ref++
|
|
|
|
//TODO: prefetch v.next
|
|
|
|
x = unsafe.Pointer(v)
|
|
|
|
(*[2]uint64)(x)[0] = 0
|
|
|
|
(*[2]uint64)(x)[1] = 0
|
|
|
|
// See if we need to replace the existing tiny block with the new one
|
|
|
|
// based on amount of remaining free space.
|
2015-01-14 12:13:55 -07:00
|
|
|
if size < c.tinyoffset {
|
|
|
|
c.tiny = x
|
|
|
|
c.tinyoffset = size
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
size = maxTinySize
|
|
|
|
} else {
|
|
|
|
var sizeclass int8
|
|
|
|
if size <= 1024-8 {
|
|
|
|
sizeclass = size_to_class8[(size+7)>>3]
|
|
|
|
} else {
|
|
|
|
sizeclass = size_to_class128[(size-1024+127)>>7]
|
|
|
|
}
|
|
|
|
size = uintptr(class_to_size[sizeclass])
|
|
|
|
s = c.alloc[sizeclass]
|
|
|
|
v := s.freelist
|
2014-11-20 10:08:13 -07:00
|
|
|
if v.ptr() == nil {
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:05:02 -07:00
|
|
|
mCache_Refill(c, int32(sizeclass))
|
|
|
|
})
|
2015-01-06 12:58:49 -07:00
|
|
|
shouldhelpgc = true
|
2014-07-30 10:01:52 -06:00
|
|
|
s = c.alloc[sizeclass]
|
|
|
|
v = s.freelist
|
|
|
|
}
|
2014-11-20 10:08:13 -07:00
|
|
|
s.freelist = v.ptr().next
|
2014-07-30 10:01:52 -06:00
|
|
|
s.ref++
|
|
|
|
//TODO: prefetch
|
|
|
|
x = unsafe.Pointer(v)
|
|
|
|
if flags&flagNoZero == 0 {
|
2014-11-20 10:08:13 -07:00
|
|
|
v.ptr().next = 0
|
2014-07-30 10:01:52 -06:00
|
|
|
if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
|
|
|
|
memclr(unsafe.Pointer(v), size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
c.local_cachealloc += intptr(size)
|
2014-07-30 10:01:52 -06:00
|
|
|
} else {
|
2014-11-11 15:05:02 -07:00
|
|
|
var s *mspan
|
2015-01-06 12:58:49 -07:00
|
|
|
shouldhelpgc = true
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:05:02 -07:00
|
|
|
s = largeAlloc(size, uint32(flags))
|
|
|
|
})
|
2014-07-30 10:01:52 -06:00
|
|
|
x = unsafe.Pointer(uintptr(s.start << pageShift))
|
|
|
|
size = uintptr(s.elemsize)
|
|
|
|
}
|
|
|
|
|
2014-08-13 10:42:55 -06:00
|
|
|
if flags&flagNoScan != 0 {
|
2015-01-16 12:43:38 -07:00
|
|
|
// All objects are pre-marked as noscan. Nothing to do.
|
|
|
|
} else {
|
|
|
|
// If allocating a defer+arg block, now that we've picked a malloc size
|
|
|
|
// large enough to hold everything, cut the "asked for" size down to
|
|
|
|
// just the defer header, so that the GC bitmap will record the arg block
|
|
|
|
// as containing nothing at all (as if it were unused space at the end of
|
|
|
|
// a malloc block caused by size rounding).
|
|
|
|
// The defer arg areas are scanned as part of scanstack.
|
|
|
|
if typ == deferType {
|
|
|
|
dataSize = unsafe.Sizeof(_defer{})
|
2014-08-07 03:34:30 -06:00
|
|
|
}
|
2015-01-16 12:43:38 -07:00
|
|
|
heapBitsSetType(uintptr(x), size, dataSize, typ)
|
2014-08-07 03:34:30 -06:00
|
|
|
}
|
2014-11-04 11:31:34 -07:00
|
|
|
|
|
|
|
// GCmarkterminate allocates black
|
|
|
|
// All slots hold nil so no scanning is needed.
|
|
|
|
// This may be racing with GC so do it atomically if there can be
|
|
|
|
// a race marking the bit.
|
|
|
|
if gcphase == _GCmarktermination {
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(func() {
|
|
|
|
gcmarknewobject_m(uintptr(x))
|
|
|
|
})
|
2014-11-04 11:31:34 -07:00
|
|
|
}
|
|
|
|
|
2014-12-22 08:53:51 -07:00
|
|
|
if mheap_.shadow_enabled {
|
|
|
|
clearshadow(uintptr(x), size)
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
if raceenabled {
|
|
|
|
racemalloc(x, size)
|
|
|
|
}
|
2014-08-18 06:33:39 -06:00
|
|
|
|
2015-01-16 12:43:38 -07:00
|
|
|
mp.mallocing = 0
|
|
|
|
releasem(mp)
|
2014-08-18 06:33:39 -06:00
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
if debug.allocfreetrace != 0 {
|
|
|
|
tracealloc(x, size, typ)
|
|
|
|
}
|
2014-08-12 15:03:32 -06:00
|
|
|
|
|
|
|
if rate := MemProfileRate; rate > 0 {
|
|
|
|
if size < uintptr(rate) && int32(size) < c.next_sample {
|
|
|
|
c.next_sample -= int32(size)
|
|
|
|
} else {
|
2014-08-18 06:33:39 -06:00
|
|
|
mp := acquirem()
|
2014-08-12 15:03:32 -06:00
|
|
|
profilealloc(mp, x, size)
|
2014-08-18 06:33:39 -06:00
|
|
|
releasem(mp)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
if shouldtriggergc() {
|
2014-07-30 10:01:52 -06:00
|
|
|
gogc(0)
|
2015-01-06 12:58:49 -07:00
|
|
|
} else if shouldhelpgc && atomicloaduint(&bggc.working) == 1 {
|
|
|
|
// bggc.lock not taken since race on bggc.working is benign.
|
|
|
|
// At worse we don't call gchelpwork.
|
|
|
|
// Delay the gchelpwork until the epilogue so that it doesn't
|
|
|
|
// interfere with the inner working of malloc such as
|
|
|
|
// mcache refills that might happen while doing the gchelpwork
|
|
|
|
systemstack(gchelpwork)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
|
|
|
|
// implementation of new builtin
|
|
|
|
func newobject(typ *_type) unsafe.Pointer {
|
2014-11-03 11:26:46 -07:00
|
|
|
flags := uint32(0)
|
2014-07-30 10:01:52 -06:00
|
|
|
if typ.kind&kindNoPointers != 0 {
|
|
|
|
flags |= flagNoScan
|
|
|
|
}
|
2014-09-08 23:08:34 -06:00
|
|
|
return mallocgc(uintptr(typ.size), typ, flags)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
|
2014-12-22 11:27:53 -07:00
|
|
|
//go:linkname reflect_unsafe_New reflect.unsafe_New
|
|
|
|
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
|
|
|
|
return newobject(typ)
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// implementation of make builtin for slices
|
|
|
|
func newarray(typ *_type, n uintptr) unsafe.Pointer {
|
2014-11-03 11:26:46 -07:00
|
|
|
flags := uint32(0)
|
2014-07-30 10:01:52 -06:00
|
|
|
if typ.kind&kindNoPointers != 0 {
|
|
|
|
flags |= flagNoScan
|
|
|
|
}
|
2014-11-11 15:05:02 -07:00
|
|
|
if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
|
2014-07-30 10:01:52 -06:00
|
|
|
panic("runtime: allocation size out of range")
|
|
|
|
}
|
2014-09-08 23:08:34 -06:00
|
|
|
return mallocgc(uintptr(typ.size)*n, typ, flags)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
|
2014-12-22 11:27:53 -07:00
|
|
|
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
|
|
|
|
func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
|
|
|
|
return newarray(typ, n)
|
|
|
|
}
|
|
|
|
|
2014-07-31 13:43:40 -06:00
|
|
|
// rawmem returns a chunk of pointerless memory. It is
|
|
|
|
// not zeroed.
|
|
|
|
func rawmem(size uintptr) unsafe.Pointer {
|
2014-09-08 23:08:34 -06:00
|
|
|
return mallocgc(size, nil, flagNoScan|flagNoZero)
|
2014-07-31 13:43:40 -06:00
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
|
|
|
c := mp.mcache
|
|
|
|
rate := MemProfileRate
|
|
|
|
if size < uintptr(rate) {
|
|
|
|
// pick next profile time
|
|
|
|
// If you change this, also change allocmcache.
|
|
|
|
if rate > 0x3fffffff { // make 2*rate not overflow
|
|
|
|
rate = 0x3fffffff
|
|
|
|
}
|
2014-09-02 15:33:33 -06:00
|
|
|
next := int32(fastrand1()) % (2 * int32(rate))
|
2014-07-30 10:01:52 -06:00
|
|
|
// Subtract the "remainder" of the current allocation.
|
|
|
|
// Otherwise objects that are close in size to sampling rate
|
|
|
|
// will be under-sampled, because we consistently discard this remainder.
|
|
|
|
next -= (int32(size) - c.next_sample)
|
|
|
|
if next < 0 {
|
|
|
|
next = 0
|
|
|
|
}
|
|
|
|
c.next_sample = next
|
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
|
|
|
|
mProf_Malloc(x, size)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
|
2014-12-12 07:51:20 -07:00
|
|
|
// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
|
|
|
|
// all go routines see the new barrier.
|
|
|
|
func gcinstallmarkwb() {
|
|
|
|
gcphase = _GCmark
|
|
|
|
}
|
|
|
|
|
|
|
|
// force = 0 - start concurrent GC
|
|
|
|
// force = 1 - do STW GC regardless of current heap usage
|
|
|
|
// force = 2 - go STW GC and eager sweep
|
2014-07-30 10:01:52 -06:00
|
|
|
func gogc(force int32) {
|
2014-08-29 08:44:38 -06:00
|
|
|
// The gc is turned off (via enablegc) until the bootstrap has completed.
|
|
|
|
// Also, malloc gets called in the guts of a number of libraries that might be
|
|
|
|
// holding locks. To avoid deadlocks during stoptheworld, don't bother
|
|
|
|
// trying to run gc while holding a lock. The next mallocgc without a lock
|
|
|
|
// will do the gc instead.
|
2014-12-12 07:51:20 -07:00
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
mp := acquirem()
|
2014-08-29 08:44:38 -06:00
|
|
|
if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
|
2014-07-30 10:01:52 -06:00
|
|
|
releasem(mp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
releasem(mp)
|
2014-08-21 01:46:53 -06:00
|
|
|
mp = nil
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
if force == 0 {
|
|
|
|
lock(&bggc.lock)
|
|
|
|
if !bggc.started {
|
|
|
|
bggc.working = 1
|
|
|
|
bggc.started = true
|
|
|
|
go backgroundgc()
|
|
|
|
} else if bggc.working == 0 {
|
|
|
|
bggc.working = 1
|
|
|
|
ready(bggc.g)
|
|
|
|
}
|
|
|
|
unlock(&bggc.lock)
|
|
|
|
} else {
|
|
|
|
gcwork(force)
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
2015-01-06 12:58:49 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func gcwork(force int32) {
|
|
|
|
|
|
|
|
semacquire(&worldsema, false)
|
2014-07-30 10:01:52 -06:00
|
|
|
|
2014-12-12 07:51:20 -07:00
|
|
|
// Pick up the remaining unswept/not being swept spans concurrently
|
|
|
|
for gosweepone() != ^uintptr(0) {
|
|
|
|
sweep.nbgsweep++
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// Ok, we're doing it! Stop everybody else
|
2014-12-12 07:51:20 -07:00
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
mp := acquirem()
|
2015-01-30 13:30:41 -07:00
|
|
|
mp.preemptoff = "gcing"
|
2014-08-21 01:46:53 -06:00
|
|
|
releasem(mp)
|
2014-12-12 07:51:20 -07:00
|
|
|
gctimer.count++
|
|
|
|
if force == 0 {
|
|
|
|
gctimer.cycle.sweepterm = nanotime()
|
|
|
|
}
|
2014-12-12 10:41:57 -07:00
|
|
|
|
|
|
|
if trace.enabled {
|
|
|
|
traceGoSched()
|
|
|
|
traceGCStart()
|
|
|
|
}
|
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
// Pick up the remaining unswept/not being swept spans before we STW
|
|
|
|
for gosweepone() != ^uintptr(0) {
|
|
|
|
sweep.nbgsweep++
|
|
|
|
}
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(stoptheworld)
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(finishsweep_m) // finish sweep before we start concurrent scan.
|
2014-12-12 07:51:20 -07:00
|
|
|
if force == 0 { // Do as much work concurrently as possible
|
runtime: fix two garbage collector bugs
First, call clearcheckmarks immediately after changing checkmark,
so that there is less time when the checkmark flag and the bitmap
are inconsistent. The tiny gap between the two lines is fine, because
the world is stopped. Before, the gap was much larger and included
such code as "go bgsweep()", which allocated.
Second, modify gcphase only when the world is stopped.
As written, gcscan_m was changing gcphase from 0 to GCscan
and back to 0 while other goroutines were running.
Another goroutine running at the same time might decide to
sleep, see GCscan, call gcphasework, and start "helping" by
scanning its stack. That's fine, except that if gcphase flips back
to 0 as the goroutine calls scanblock, it will start draining the
work buffers prematurely.
Both of these were found wbshadow=2 (and a lot of hard work).
Eventually that will run automatically, but right now it still
doesn't quite work for all.bash, due to mmap conflicts with
pthread-created threads.
Change-Id: I99aa8210cff9c6e7d0a1b62c75be32a23321897b
Reviewed-on: https://go-review.googlesource.com/2340
Reviewed-by: Rick Hudson <rlh@golang.org>
2015-01-05 13:02:09 -07:00
|
|
|
gcphase = _GCscan
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(starttheworld)
|
2014-12-12 07:51:20 -07:00
|
|
|
gctimer.cycle.scan = nanotime()
|
2014-11-10 11:42:34 -07:00
|
|
|
// Do a concurrent heap scan before we stop the world.
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(gcscan_m)
|
2014-12-12 07:51:20 -07:00
|
|
|
gctimer.cycle.installmarkwb = nanotime()
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(stoptheworld)
|
2015-01-06 12:58:49 -07:00
|
|
|
systemstack(gcinstallmarkwb)
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(starttheworld)
|
2014-12-12 07:51:20 -07:00
|
|
|
gctimer.cycle.mark = nanotime()
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(gcmark_m)
|
2014-12-12 07:51:20 -07:00
|
|
|
gctimer.cycle.markterm = nanotime()
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(stoptheworld)
|
|
|
|
systemstack(gcinstalloffwb_m)
|
2015-01-26 11:51:39 -07:00
|
|
|
} else {
|
|
|
|
// For non-concurrent GC (force != 0) g stack have not been scanned so
|
|
|
|
// set gcscanvalid such that mark termination scans all stacks.
|
|
|
|
// No races here since we are in a STW phase.
|
|
|
|
for _, gp := range allgs {
|
|
|
|
gp.gcworkdone = false // set to true in gcphasework
|
|
|
|
gp.gcscanvalid = false // stack has not been scanned
|
|
|
|
}
|
2014-11-10 11:42:34 -07:00
|
|
|
}
|
2014-11-15 06:00:38 -07:00
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
startTime := nanotime()
|
2014-08-21 01:46:53 -06:00
|
|
|
if mp != acquirem() {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("gogc: rescheduled")
|
2014-08-21 01:46:53 -06:00
|
|
|
}
|
2014-07-30 10:01:52 -06:00
|
|
|
|
|
|
|
clearpools()
|
|
|
|
|
|
|
|
// Run gc on the g0 stack. We do this so that the g stack
|
|
|
|
// we're currently running on will no longer change. Cuts
|
|
|
|
// the root set down a bit (g0 stacks are not scanned, and
|
|
|
|
// we don't need to scan gc's internal state). We also
|
|
|
|
// need to switch to g0 so we can shrink the stack.
|
|
|
|
n := 1
|
|
|
|
if debug.gctrace > 1 {
|
|
|
|
n = 2
|
|
|
|
}
|
2014-11-15 06:00:38 -07:00
|
|
|
eagersweep := force >= 2
|
2014-07-30 10:01:52 -06:00
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
if i > 0 {
|
2015-01-06 12:58:49 -07:00
|
|
|
// refresh start time if doing a second GC
|
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://golang.org/cl/138740043
2014-08-27 19:59:49 -06:00
|
|
|
startTime = nanotime()
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
// switch to g0, call gc, then switch back
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
|
|
|
gc_m(startTime, eagersweep)
|
|
|
|
})
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
|
2014-11-15 06:00:38 -07:00
|
|
|
systemstack(func() {
|
|
|
|
gccheckmark_m(startTime, eagersweep)
|
|
|
|
})
|
2014-11-04 11:31:34 -07:00
|
|
|
|
2014-12-12 10:41:57 -07:00
|
|
|
if trace.enabled {
|
|
|
|
traceGCDone()
|
|
|
|
traceGoStart()
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// all done
|
2015-01-30 13:30:41 -07:00
|
|
|
mp.preemptoff = ""
|
2014-12-12 07:51:20 -07:00
|
|
|
|
|
|
|
if force == 0 {
|
|
|
|
gctimer.cycle.sweep = nanotime()
|
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
semrelease(&worldsema)
|
2014-12-12 07:51:20 -07:00
|
|
|
|
|
|
|
if force == 0 {
|
|
|
|
if gctimer.verbose > 1 {
|
|
|
|
GCprinttimes()
|
|
|
|
} else if gctimer.verbose > 0 {
|
|
|
|
calctimes() // ignore result
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(starttheworld)
|
2014-12-12 07:51:20 -07:00
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
releasem(mp)
|
2014-08-21 01:46:53 -06:00
|
|
|
mp = nil
|
2014-07-30 10:01:52 -06:00
|
|
|
|
|
|
|
// now that gc is done, kick off finalizer thread if needed
|
|
|
|
if !concurrentSweep {
|
|
|
|
// give the queued finalizers, if any, a chance to run
|
2014-09-11 14:22:21 -06:00
|
|
|
Gosched()
|
2014-07-30 10:01:52 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-12 07:51:20 -07:00
|
|
|
// gctimes records the time in nanoseconds of each phase of the concurrent GC.
|
|
|
|
type gctimes struct {
|
|
|
|
sweepterm int64 // stw
|
2015-01-06 12:58:49 -07:00
|
|
|
scan int64
|
|
|
|
installmarkwb int64 // stw
|
2014-12-12 07:51:20 -07:00
|
|
|
mark int64
|
|
|
|
markterm int64 // stw
|
|
|
|
sweep int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// gcchronograph holds timer information related to GC phases
|
|
|
|
// max records the maximum time spent in each GC phase since GCstarttimes.
|
|
|
|
// total records the total time spent in each GC phase since GCstarttimes.
|
|
|
|
// cycle records the absolute time (as returned by nanoseconds()) that each GC phase last started at.
|
|
|
|
type gcchronograph struct {
|
|
|
|
count int64
|
|
|
|
verbose int64
|
|
|
|
maxpause int64
|
|
|
|
max gctimes
|
|
|
|
total gctimes
|
|
|
|
cycle gctimes
|
|
|
|
}
|
|
|
|
|
|
|
|
var gctimer gcchronograph
|
|
|
|
|
2015-01-06 12:58:49 -07:00
|
|
|
// GCstarttimes initializes the gc times. All previous times are lost.
|
2014-12-12 07:51:20 -07:00
|
|
|
func GCstarttimes(verbose int64) {
|
|
|
|
gctimer = gcchronograph{verbose: verbose}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GCendtimes stops the gc timers.
|
|
|
|
func GCendtimes() {
|
|
|
|
gctimer.verbose = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// calctimes converts gctimer.cycle into the elapsed times, updates gctimer.total
|
|
|
|
// and updates gctimer.max with the max pause time.
|
|
|
|
func calctimes() gctimes {
|
|
|
|
var times gctimes
|
|
|
|
|
|
|
|
var max = func(a, b int64) int64 {
|
|
|
|
if a > b {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
times.sweepterm = gctimer.cycle.scan - gctimer.cycle.sweepterm
|
|
|
|
gctimer.total.sweepterm += times.sweepterm
|
|
|
|
gctimer.max.sweepterm = max(gctimer.max.sweepterm, times.sweepterm)
|
|
|
|
gctimer.maxpause = max(gctimer.maxpause, gctimer.max.sweepterm)
|
|
|
|
|
|
|
|
times.scan = gctimer.cycle.installmarkwb - gctimer.cycle.scan
|
|
|
|
gctimer.total.scan += times.scan
|
|
|
|
gctimer.max.scan = max(gctimer.max.scan, times.scan)
|
|
|
|
|
|
|
|
times.installmarkwb = gctimer.cycle.mark - gctimer.cycle.installmarkwb
|
|
|
|
gctimer.total.installmarkwb += times.installmarkwb
|
|
|
|
gctimer.max.installmarkwb = max(gctimer.max.installmarkwb, times.installmarkwb)
|
|
|
|
gctimer.maxpause = max(gctimer.maxpause, gctimer.max.installmarkwb)
|
|
|
|
|
|
|
|
times.mark = gctimer.cycle.markterm - gctimer.cycle.mark
|
|
|
|
gctimer.total.mark += times.mark
|
|
|
|
gctimer.max.mark = max(gctimer.max.mark, times.mark)
|
|
|
|
|
|
|
|
times.markterm = gctimer.cycle.sweep - gctimer.cycle.markterm
|
|
|
|
gctimer.total.markterm += times.markterm
|
|
|
|
gctimer.max.markterm = max(gctimer.max.markterm, times.markterm)
|
|
|
|
gctimer.maxpause = max(gctimer.maxpause, gctimer.max.markterm)
|
|
|
|
|
|
|
|
return times
|
|
|
|
}
|
|
|
|
|
|
|
|
// GCprinttimes prints latency information in nanoseconds about various
|
|
|
|
// phases in the GC. The information for each phase includes the maximum pause
|
|
|
|
// and total time since the most recent call to GCstarttimes as well as
|
|
|
|
// the information from the most recent Concurent GC cycle. Calls from the
|
|
|
|
// application to runtime.GC() are ignored.
|
|
|
|
func GCprinttimes() {
|
2015-01-06 12:58:49 -07:00
|
|
|
if gctimer.verbose == 0 {
|
|
|
|
println("GC timers not enabled")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-01-07 13:34:02 -07:00
|
|
|
// Explicitly put times on the heap so printPhase can use it.
|
|
|
|
times := new(gctimes)
|
|
|
|
*times = calctimes()
|
2014-12-12 07:51:20 -07:00
|
|
|
cycletime := gctimer.cycle.sweep - gctimer.cycle.sweepterm
|
2015-01-07 13:34:02 -07:00
|
|
|
pause := times.sweepterm + times.installmarkwb + times.markterm
|
|
|
|
gomaxprocs := GOMAXPROCS(-1)
|
|
|
|
|
|
|
|
printlock()
|
|
|
|
print("GC: #", gctimer.count, " ", cycletime, "ns @", gctimer.cycle.sweepterm, " pause=", pause, " maxpause=", gctimer.maxpause, " goroutines=", allglen, " gomaxprocs=", gomaxprocs, "\n")
|
|
|
|
printPhase := func(label string, get func(*gctimes) int64, procs int) {
|
|
|
|
print("GC: ", label, " ", get(times), "ns\tmax=", get(&gctimer.max), "\ttotal=", get(&gctimer.total), "\tprocs=", procs, "\n")
|
|
|
|
}
|
|
|
|
printPhase("sweep term:", func(t *gctimes) int64 { return t.sweepterm }, gomaxprocs)
|
|
|
|
printPhase("scan: ", func(t *gctimes) int64 { return t.scan }, 1)
|
|
|
|
printPhase("install wb:", func(t *gctimes) int64 { return t.installmarkwb }, gomaxprocs)
|
|
|
|
printPhase("mark: ", func(t *gctimes) int64 { return t.mark }, 1)
|
|
|
|
printPhase("mark term: ", func(t *gctimes) int64 { return t.markterm }, gomaxprocs)
|
|
|
|
printunlock()
|
2014-12-12 07:51:20 -07:00
|
|
|
}
|
|
|
|
|
2014-07-30 10:01:52 -06:00
|
|
|
// GC runs a garbage collection.
|
|
|
|
func GC() {
|
|
|
|
gogc(2)
|
|
|
|
}
|
|
|
|
|
2014-10-06 12:18:09 -06:00
|
|
|
// linker-provided
|
|
|
|
var noptrdata struct{}
|
2014-11-19 13:25:33 -07:00
|
|
|
var enoptrdata struct{}
|
|
|
|
var noptrbss struct{}
|
2014-10-06 12:18:09 -06:00
|
|
|
var enoptrbss struct{}
|
|
|
|
|
2014-08-28 14:23:10 -06:00
|
|
|
// round n up to a multiple of a. a must be a power of 2.
|
|
|
|
func round(n, a uintptr) uintptr {
|
|
|
|
return (n + a - 1) &^ (a - 1)
|
|
|
|
}
|
|
|
|
|
2014-09-03 22:54:06 -06:00
|
|
|
var persistent struct {
|
|
|
|
lock mutex
|
2015-01-14 12:13:55 -07:00
|
|
|
base unsafe.Pointer
|
|
|
|
off uintptr
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wrapper around sysAlloc that can allocate small chunks.
|
|
|
|
// There is no associated free operation.
|
|
|
|
// Intended for things like function/type/debug-related persistent data.
|
|
|
|
// If align is 0, uses default align (currently 8).
|
|
|
|
func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
|
|
|
const (
|
|
|
|
chunk = 256 << 10
|
|
|
|
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
|
|
|
|
)
|
|
|
|
|
2015-01-14 12:13:55 -07:00
|
|
|
if size == 0 {
|
|
|
|
throw("persistentalloc: size == 0")
|
|
|
|
}
|
2014-09-03 22:54:06 -06:00
|
|
|
if align != 0 {
|
|
|
|
if align&(align-1) != 0 {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("persistentalloc: align is not a power of 2")
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
|
|
|
if align > _PageSize {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("persistentalloc: align is too large")
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
align = 8
|
|
|
|
}
|
|
|
|
|
|
|
|
if size >= maxBlock {
|
|
|
|
return sysAlloc(size, stat)
|
|
|
|
}
|
|
|
|
|
|
|
|
lock(&persistent.lock)
|
2015-01-14 12:13:55 -07:00
|
|
|
persistent.off = round(persistent.off, align)
|
2015-01-14 13:48:32 -07:00
|
|
|
if persistent.off+size > chunk || persistent.base == nil {
|
2015-01-14 12:13:55 -07:00
|
|
|
persistent.base = sysAlloc(chunk, &memstats.other_sys)
|
|
|
|
if persistent.base == nil {
|
2014-09-03 22:54:06 -06:00
|
|
|
unlock(&persistent.lock)
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: cannot allocate memory")
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
2015-01-14 12:13:55 -07:00
|
|
|
persistent.off = 0
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
2015-01-14 12:13:55 -07:00
|
|
|
p := add(persistent.base, persistent.off)
|
|
|
|
persistent.off += size
|
2014-09-03 22:54:06 -06:00
|
|
|
unlock(&persistent.lock)
|
|
|
|
|
|
|
|
if stat != &memstats.other_sys {
|
|
|
|
xadd64(stat, int64(size))
|
|
|
|
xadd64(&memstats.other_sys, -int64(size))
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|