mirror of
https://github.com/golang/go
synced 2024-11-11 23:10:23 -07:00
runtime: rename fastrand1 to fastrand
Change-Id: I37706ff0a3486827c5b072c95ad890ea87ede847 Reviewed-on: https://go-review.googlesource.com/28210 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
parent
f9dafc742d
commit
2b74de3ed9
@ -109,7 +109,7 @@ func f32hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
case f == 0:
|
||||
return c1 * (c0 ^ h) // +0, -0
|
||||
case f != f:
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
||||
default:
|
||||
return memhash(p, h, 4)
|
||||
}
|
||||
@ -121,7 +121,7 @@ func f64hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
case f == 0:
|
||||
return c1 * (c0 ^ h) // +0, -0
|
||||
case f != f:
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
||||
default:
|
||||
return memhash(p, h, 8)
|
||||
}
|
||||
|
@ -1573,7 +1573,7 @@ allsame:
|
||||
MOVL BX, (AX)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
get_tls(CX)
|
||||
MOVL g(CX), AX
|
||||
MOVL g_m(AX), AX
|
||||
|
@ -2052,7 +2052,7 @@ eqret:
|
||||
MOVB $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
get_tls(CX)
|
||||
MOVQ g(CX), AX
|
||||
MOVQ g_m(AX), AX
|
||||
|
@ -973,7 +973,7 @@ eqret:
|
||||
MOVB AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
get_tls(CX)
|
||||
MOVL g(CX), AX
|
||||
MOVL g_m(AX), AX
|
||||
|
@ -952,7 +952,7 @@ _sib_notfound:
|
||||
MOVW R0, ret+12(FP)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB),NOSPLIT,$-4-4
|
||||
TEXT runtime·fastrand(SB),NOSPLIT,$-4-4
|
||||
MOVW g_m(g), R1
|
||||
MOVW m_fastrand(R1), R0
|
||||
ADD.S R0, R0
|
||||
|
@ -949,7 +949,7 @@ equal:
|
||||
MOVB R0, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB),NOSPLIT,$-8-4
|
||||
TEXT runtime·fastrand(SB),NOSPLIT,$-8-4
|
||||
MOVD g_m(g), R1
|
||||
MOVWU m_fastrand(R1), R0
|
||||
ADD R0, R0
|
||||
|
@ -822,7 +822,7 @@ notfound:
|
||||
MOVV R1, ret+24(FP)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
MOVV g_m(g), R2
|
||||
MOVWU m_fastrand(R2), R1
|
||||
ADDU R1, R1
|
||||
|
@ -1042,7 +1042,7 @@ samebytes:
|
||||
MOVD R8, (R7)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
MOVD g_m(g), R4
|
||||
MOVWZ m_fastrand(R4), R3
|
||||
ADD R3, R3
|
||||
|
@ -874,7 +874,7 @@ TEXT runtime·memeqbodyclc(SB),NOSPLIT|NOFRAME,$0-0
|
||||
CLC $1, 0(R3), 0(R5)
|
||||
RET
|
||||
|
||||
TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
|
||||
TEXT runtime·fastrand(SB), NOSPLIT, $0-4
|
||||
MOVD g_m(g), R4
|
||||
MOVWZ m_fastrand(R4), R3
|
||||
ADD R3, R3
|
||||
|
@ -256,7 +256,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
|
||||
h.count = 0
|
||||
h.B = B
|
||||
h.flags = 0
|
||||
h.hash0 = fastrand1()
|
||||
h.hash0 = fastrand()
|
||||
h.buckets = buckets
|
||||
h.oldbuckets = nil
|
||||
h.nevacuate = 0
|
||||
@ -655,9 +655,9 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
||||
}
|
||||
|
||||
// decide where to start
|
||||
r := uintptr(fastrand1())
|
||||
r := uintptr(fastrand())
|
||||
if h.B > 31-bucketCntBits {
|
||||
r += uintptr(fastrand1()) << 31
|
||||
r += uintptr(fastrand()) << 31
|
||||
}
|
||||
it.startBucket = r & (uintptr(1)<<h.B - 1)
|
||||
it.offset = uint8(r >> h.B & (bucketCnt - 1))
|
||||
|
@ -843,7 +843,7 @@ func nextSample() int32 {
|
||||
// x = -log_e(q) * period
|
||||
// x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
|
||||
const randomBitCount = 26
|
||||
q := fastrand1()%(1<<randomBitCount) + 1
|
||||
q := fastrand()%(1<<randomBitCount) + 1
|
||||
qlog := fastlog2(float64(q)) - randomBitCount
|
||||
if qlog > 0 {
|
||||
qlog = 0
|
||||
@ -861,7 +861,7 @@ func nextSampleNoFP() int32 {
|
||||
rate = 0x3fffffff
|
||||
}
|
||||
if rate != 0 {
|
||||
return int32(int(fastrand1()) % (2 * rate))
|
||||
return int32(int(fastrand()) % (2 * rate))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
@ -616,7 +616,7 @@ func (c *gcControllerState) enlistWorker() {
|
||||
}
|
||||
myID := gp.m.p.ptr().id
|
||||
for tries := 0; tries < 5; tries++ {
|
||||
id := int32(fastrand1() % uint32(gomaxprocs-1))
|
||||
id := int32(fastrand() % uint32(gomaxprocs-1))
|
||||
if id >= myID {
|
||||
id++
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ func blockevent(cycles int64, skip int) {
|
||||
cycles = 1
|
||||
}
|
||||
rate := int64(atomic.Load64(&blockprofilerate))
|
||||
if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
|
||||
if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
|
||||
return
|
||||
}
|
||||
gp := getg()
|
||||
|
@ -17,8 +17,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -6,8 +6,8 @@ package runtime
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -53,8 +53,8 @@ func archauxv(tag, val uintptr) {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand().
|
||||
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// randomNumber provides better seeding of fastrand1.
|
||||
// randomNumber provides better seeding of fastrand.
|
||||
return nanotime() + int64(randomNumber)
|
||||
}
|
||||
|
@ -19,8 +19,8 @@ func archauxv(tag, val uintptr) {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand().
|
||||
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// randomNumber provides better seeding of fastrand1.
|
||||
// randomNumber provides better seeding of fastrand.
|
||||
return nanotime() + int64(randomNumber)
|
||||
}
|
||||
|
@ -22,9 +22,9 @@ func archauxv(tag, val uintptr) {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed fastrand().
|
||||
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// randomNumber provides better seeding of fastrand1.
|
||||
// randomNumber provides better seeding of fastrand.
|
||||
return nanotime() + int64(randomNumber)
|
||||
}
|
||||
|
||||
|
@ -16,8 +16,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -28,8 +28,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ func checkgoarm() {
|
||||
|
||||
//go:nosplit
|
||||
func cputicks() int64 {
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
|
||||
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
|
||||
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
|
||||
// TODO: need more entropy to better seed fastrand1.
|
||||
// TODO: need more entropy to better seed fastrand.
|
||||
return nanotime()
|
||||
}
|
||||
|
@ -1909,7 +1909,7 @@ top:
|
||||
atomic.Xadd(&sched.nmspinning, 1)
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
for enum := stealOrder.start(fastrand1()); !enum.done(); enum.next() {
|
||||
for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
|
||||
if sched.gcwaiting != 0 {
|
||||
goto top
|
||||
}
|
||||
@ -4034,7 +4034,7 @@ const randomizeScheduler = raceenabled
|
||||
// If the run queue is full, runnext puts g on the global queue.
|
||||
// Executed only by the owner P.
|
||||
func runqput(_p_ *p, gp *g, next bool) {
|
||||
if randomizeScheduler && next && fastrand1()%2 == 0 {
|
||||
if randomizeScheduler && next && fastrand()%2 == 0 {
|
||||
next = false
|
||||
}
|
||||
|
||||
@ -4087,7 +4087,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
|
||||
|
||||
if randomizeScheduler {
|
||||
for i := uint32(1); i <= n; i++ {
|
||||
j := fastrand1() % (i + 1)
|
||||
j := fastrand() % (i + 1)
|
||||
batch[i], batch[j] = batch[j], batch[i]
|
||||
}
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ func selectgoImpl(sel *hselect) (uintptr, uint16) {
|
||||
pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
|
||||
pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
|
||||
for i := 1; i < int(sel.ncase); i++ {
|
||||
j := int(fastrand1()) % (i + 1)
|
||||
j := int(fastrand()) % (i + 1)
|
||||
pollorder[i] = pollorder[j]
|
||||
pollorder[j] = uint16(i)
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
|
||||
var hashLoad = loadFactor
|
||||
|
||||
// in asm_*.s
|
||||
func fastrand1() uint32
|
||||
func fastrand() uint32
|
||||
|
||||
// in asm_*.s
|
||||
//go:noescape
|
||||
|
@ -437,7 +437,7 @@ func pcvalue(f *_func, off int32, targetpc uintptr, cache *pcvalueCache, strict
|
||||
// a recursive stack's cycle is slightly
|
||||
// larger than the cache.
|
||||
if cache != nil {
|
||||
ci := fastrand1() % uint32(len(cache.entries))
|
||||
ci := fastrand() % uint32(len(cache.entries))
|
||||
cache.entries[ci] = pcvalueCacheEnt{
|
||||
targetpc: targetpc,
|
||||
off: off,
|
||||
|
Loading…
Reference in New Issue
Block a user