1
0
mirror of https://github.com/golang/go synced 2024-11-12 01:50:22 -07:00

runtime: consistently seed fastrand state across archs

Some, but not all, architectures mix in OS-provided random seeds when
initializing the fastrand state. The others have TODOs saying we need
to do the same. Lift that logic up in the architecture-independent
part, and use memhash to mix the seed instead of a simple addition.

Previously, dumping the fastrand state at initialization would yield
something like the following on linux-amd64, where the values in the
first column do not change between runs (as thread IDs are sequential
and always start at 0), and the values in the second column, while
changing every run, are pretty correlated:

first run:

0x0 0x44d82f1c
0x5f356495 0x44f339de
0xbe6ac92a 0x44f91cd8
0x1da02dbf 0x44fd91bc
0x7cd59254 0x44fee8a4
0xdc0af6e9 0x4547a1e0
0x3b405b7e 0x474c76fc
0x9a75c013 0x475309dc
0xf9ab24a8 0x4bffd075

second run:

0x0 0xa63fc3eb
0x5f356495 0xa6648dc2
0xbe6ac92a 0xa66c1c59
0x1da02dbf 0xa671bce8
0x7cd59254 0xa70e8287
0xdc0af6e9 0xa7129d2e
0x3b405b7e 0xa7379e2d
0x9a75c013 0xa7e4c64c
0xf9ab24a8 0xa7ecce07

With this change, we get initial states that appear to be much more
unpredictable, both within the same run as well as between runs:

0x11bddad7 0x97241c63
0x553dacc6 0x2bcd8523
0x62c01085 0x16413d92
0x6f40e9e6 0x7a138de6
0xa4898053 0x70d816f0
0x5ca5b433 0x188a395b
0x62778ca9 0xd462c3b5
0xd6e160e4 0xac9b4bd
0xb9571d65 0x597a981d

Change-Id: Ib22c530157d74200df0083f830e0408fd4aaea58
Reviewed-on: https://go-review.googlesource.com/c/go/+/203439
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Carlo Alberto Ferraris 2019-10-26 16:26:59 +09:00 committed by Keith Randall
parent 2de897f480
commit 97d0505334
15 changed files with 14 additions and 58 deletions

View File

@ -19,6 +19,5 @@ func checkgoarm() {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -8,6 +8,5 @@ package runtime
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -44,6 +44,5 @@ func archauxv(tag, val uintptr) {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -151,6 +151,5 @@ func extractBits(data uint64, start, end uint) uint {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -131,7 +131,6 @@ func os_sigpipe() {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -11,8 +11,6 @@ const (
_HWCAP_VFPv3 = 1 << 13 // introduced in 2.6.30
)
var randomNumber uint32
func checkgoarm() {
// On Android, /proc/self/auxv might be unreadable and hwcap won't
// reflect the CPU capabilities. Assume that every Android arm device
@ -34,13 +32,6 @@ func checkgoarm() {
func archauxv(tag, val uintptr) {
switch tag {
case _AT_RANDOM:
// sysargs filled in startupRandomData, but that
// pointer may not be word aligned, so we must treat
// it as a byte array.
randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
case _AT_HWCAP:
cpu.HWCap = uint(val)
case _AT_HWCAP2:
@ -52,6 +43,5 @@ func archauxv(tag, val uintptr) {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber)
return nanotime()
}

View File

@ -8,17 +8,8 @@ package runtime
import "internal/cpu"
var randomNumber uint32
func archauxv(tag, val uintptr) {
switch tag {
case _AT_RANDOM:
// sysargs filled in startupRandomData, but that
// pointer may not be word aligned, so we must treat
// it as a byte array.
randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
case _AT_HWCAP:
// arm64 doesn't have a 'cpuid' instruction equivalent and relies on
// HWCAP/HWCAP2 bits for hardware capabilities.
@ -40,6 +31,5 @@ func archauxv(tag, val uintptr) {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber)
return nanotime()
}

View File

@ -7,25 +7,14 @@
package runtime
var randomNumber uint32
func archauxv(tag, val uintptr) {
switch tag {
case _AT_RANDOM:
// sysargs filled in startupRandomData, but that
// pointer may not be word aligned, so we must treat
// it as a byte array.
randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
}
}
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand.
return nanotime() + int64(randomNumber)
return nanotime()
}
const (

View File

@ -7,25 +7,14 @@
package runtime
var randomNumber uint32
func archauxv(tag, val uintptr) {
switch tag {
case _AT_RANDOM:
// sysargs filled in startupRandomData, but that
// pointer may not be word aligned, so we must treat
// it as a byte array.
randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
}
}
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand().
// nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// randomNumber provides better seeding of fastrand1.
return nanotime() + int64(randomNumber)
return nanotime()
}
const (

View File

@ -30,6 +30,5 @@ func checkgoarm() {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -19,6 +19,5 @@ func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintp
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -19,6 +19,5 @@ func checkgoarm() {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -12,7 +12,6 @@ import (
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -12,6 +12,5 @@ func checkgoarm() {
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand.
return nanotime()
}

View File

@ -543,6 +543,7 @@ func schedinit() {
moduledataverify()
stackinit()
mallocinit()
fastrandinit() // must run before mcommoninit
mcommoninit(_g_.m)
cpuinit() // must run before alginit
alginit() // maps must not be used before this call
@ -620,8 +621,8 @@ func mcommoninit(mp *m) {
sched.mnext++
checkmcount()
mp.fastrand[0] = 1597334677 * uint32(mp.id)
mp.fastrand[1] = uint32(cputicks())
mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
if mp.fastrand[0]|mp.fastrand[1] == 0 {
mp.fastrand[1] = 1
}
@ -646,6 +647,13 @@ func mcommoninit(mp *m) {
}
}
var fastrandseed uintptr
func fastrandinit() {
s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
getRandomData(s)
}
// Mark gp ready to run.
func ready(gp *g, traceskip int, next bool) {
if trace.enabled {