diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f840ef4066..a1b5a03687 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -3569,13 +3569,17 @@ func init() { alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...) alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...) alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) + alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) + alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...) alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...) alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) + alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) + alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index fcd7728c7b..615b5ef769 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -1338,11 +1338,6 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p * return p } - // Allow sync package to access lightweight atomic functions limited to the runtime. - if p.Standard && strings.HasPrefix(importerPath, "sync") && p.ImportPath == "runtime/internal/atomic" { - return p - } - // Internal is present. // Map import path back to directory corresponding to parent of internal. if i > 0 { diff --git a/src/sync/pool.go b/src/sync/pool.go index 137413fdc4..1ae70127ac 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -7,7 +7,6 @@ package sync import ( "internal/race" "runtime" - runtimeatomic "runtime/internal/atomic" "sync/atomic" "unsafe" ) @@ -153,8 +152,8 @@ func (p *Pool) Get() interface{} { func (p *Pool) getSlow(pid int) interface{} { // See the comment in pin regarding ordering of the loads. - size := runtimeatomic.LoadAcquintptr(&p.localSize) // load-acquire - locals := p.local // load-consume + size := runtime_LoadAcquintptr(&p.localSize) // load-acquire + locals := p.local // load-consume // Try to steal one element from other procs. for i := 0; i < int(size); i++ { l := indexLocal(locals, (pid+i+1)%int(size)) @@ -166,7 +165,7 @@ func (p *Pool) getSlow(pid int) interface{} { // Try the victim cache. We do this after attempting to steal // from all primary caches because we want objects in the // victim cache to age out if at all possible. - size = runtimeatomic.Loaduintptr(&p.victimSize) + size = atomic.LoadUintptr(&p.victimSize) if uintptr(pid) >= size { return nil } @@ -199,8 +198,8 @@ func (p *Pool) pin() (*poolLocal, int) { // Since we've disabled preemption, GC cannot happen in between. // Thus here we must observe local at least as large localSize. // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). - s := runtimeatomic.LoadAcquintptr(&p.localSize) // load-acquire - l := p.local // load-consume + s := runtime_LoadAcquintptr(&p.localSize) // load-acquire + l := p.local // load-consume if uintptr(pid) < s { return indexLocal(l, pid), pid } @@ -226,8 +225,8 @@ func (p *Pool) pinSlow() (*poolLocal, int) { // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. size := runtime.GOMAXPROCS(0) local := make([]poolLocal, size) - atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release - runtimeatomic.StoreReluintptr(&p.localSize, uintptr(size)) // store-release + atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release + runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release return &local[pid], pid } @@ -283,3 +282,13 @@ func indexLocal(l unsafe.Pointer, i int) *poolLocal { func runtime_registerPoolCleanup(cleanup func()) func runtime_procPin() int func runtime_procUnpin() + +// The below are implemented in runtime/internal/atomic and the +// compiler also knows to intrinsify the symbol we linkname into this +// package. + +//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr +func runtime_LoadAcquintptr(ptr *uintptr) uintptr + +//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr +func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr