mirror of
https://github.com/golang/go
synced 2024-11-16 20:14:48 -07:00
runtime,sync: using fastrandn instead of modulo reduction
fastrandn is ~50% faster than fastrand() % n. `ack -v 'fastrand\(\)\s?\%'` finds all modulo on fastrand() name old time/op new time/op delta Fastrandn/2 2.86ns ± 0% 1.59ns ± 0% -44.35% (p=0.000 n=9+10) Fastrandn/3 2.87ns ± 1% 1.59ns ± 0% -44.41% (p=0.000 n=10+9) Fastrandn/4 2.87ns ± 1% 1.58ns ± 1% -45.10% (p=0.000 n=10+10) Fastrandn/5 2.86ns ± 1% 1.58ns ± 1% -44.84% (p=0.000 n=10+10) Change-Id: Ic91f5ca9b9e3b65127bc34792b62fd64fbd13b5c Reviewed-on: https://go-review.googlesource.com/c/go/+/353269 Trust: Meng Zhuo <mzh@golangcn.org> Run-TryBot: Meng Zhuo <mzh@golangcn.org> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
parent
6f74ed06c5
commit
ecb2f231fa
@ -1301,7 +1301,7 @@ func fastexprand(mean int) int32 {
|
||||
// x = -log_e(q) * mean
|
||||
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
|
||||
const randomBitCount = 26
|
||||
q := fastrand()%(1<<randomBitCount) + 1
|
||||
q := fastrandn(1<<randomBitCount) + 1
|
||||
qlog := fastlog2(float64(q)) - randomBitCount
|
||||
if qlog > 0 {
|
||||
qlog = 0
|
||||
@ -1319,7 +1319,7 @@ func nextSampleNoFP() uintptr {
|
||||
rate = 0x3fffffff
|
||||
}
|
||||
if rate != 0 {
|
||||
return uintptr(fastrand() % uint32(2*rate))
|
||||
return uintptr(fastrandn(uint32(2 * rate)))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
@ -974,7 +974,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
|
||||
// machine instructions.
|
||||
|
||||
outOfPlace := false
|
||||
if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
|
||||
if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrandn(2) == 0) {
|
||||
// This object spans heap arenas, so the bitmap may be
|
||||
// discontiguous. Unroll it into the object instead
|
||||
// and then copy it out.
|
||||
|
@ -5840,7 +5840,7 @@ const randomizeScheduler = raceenabled
|
||||
// If the run queue is full, runnext puts g on the global queue.
|
||||
// Executed only by the owner P.
|
||||
func runqput(_p_ *p, gp *g, next bool) {
|
||||
if randomizeScheduler && next && fastrand()%2 == 0 {
|
||||
if randomizeScheduler && next && fastrandn(2) == 0 {
|
||||
next = false
|
||||
}
|
||||
|
||||
|
@ -156,8 +156,8 @@ func fastrandn(n uint32) uint32 {
|
||||
return uint32(uint64(fastrand()) * uint64(n) >> 32)
|
||||
}
|
||||
|
||||
//go:linkname sync_fastrand sync.fastrand
|
||||
func sync_fastrand() uint32 { return fastrand() }
|
||||
//go:linkname sync_fastrandn sync.fastrandn
|
||||
func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
|
||||
|
||||
//go:linkname net_fastrand net.fastrand
|
||||
func net_fastrand() uint32 { return fastrand() }
|
||||
|
@ -921,7 +921,7 @@ func pcvalue(f funcInfo, off uint32, targetpc uintptr, cache *pcvalueCache, stri
|
||||
if cache != nil {
|
||||
x := pcvalueCacheKey(targetpc)
|
||||
e := &cache.entries[x]
|
||||
ci := fastrand() % uint32(len(cache.entries[x]))
|
||||
ci := fastrandn(uint32(len(cache.entries[x])))
|
||||
e[ci] = e[0]
|
||||
e[0] = pcvalueCacheEnt{
|
||||
targetpc: targetpc,
|
||||
|
@ -71,7 +71,7 @@ type poolLocal struct {
|
||||
}
|
||||
|
||||
// from runtime
|
||||
func fastrand() uint32
|
||||
func fastrandn(n uint32) uint32
|
||||
|
||||
var poolRaceHash [128]uint64
|
||||
|
||||
@ -92,7 +92,7 @@ func (p *Pool) Put(x interface{}) {
|
||||
return
|
||||
}
|
||||
if race.Enabled {
|
||||
if fastrand()%4 == 0 {
|
||||
if fastrandn(4) == 0 {
|
||||
// Randomly drop x on floor.
|
||||
return
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user