mirror of
https://github.com/golang/go
synced 2024-11-23 19:30:05 -07:00
runtime: eliminate some unnecessary uintptr conversions
arena_{start,used,end} are already uintptr, so no need to convert them to uintptr, much less to convert them to unsafe.Pointer and then to uintptr. No binary change to pkg/linux_amd64/runtime.a. Change-Id: Ia4232ed2a724c44fde7eba403c5fe8e6dccaa879 Reviewed-on: https://go-review.googlesource.com/16339 Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org> Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
parent
53d43cb556
commit
4ff231bca1
@ -388,7 +388,7 @@ func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
|
||||
}
|
||||
|
||||
func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
||||
if n > uintptr(h.arena_end)-uintptr(h.arena_used) {
|
||||
if n > h.arena_end-h.arena_used {
|
||||
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
|
||||
// Reserve some more space.
|
||||
p_size := round(n+_PageSize, 256<<20)
|
||||
@ -420,7 +420,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
||||
}
|
||||
}
|
||||
|
||||
if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
|
||||
if n <= h.arena_end-h.arena_used {
|
||||
// Keep taking from our reservation.
|
||||
p := h.arena_used
|
||||
sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
|
||||
@ -438,7 +438,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
||||
}
|
||||
|
||||
// If using 64-bit, our reservation is all we have.
|
||||
if uintptr(h.arena_end)-uintptr(h.arena_start) >= _MaxArena32 {
|
||||
if h.arena_end-h.arena_start >= _MaxArena32 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -451,7 +451,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
||||
return nil
|
||||
}
|
||||
|
||||
if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
|
||||
if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 {
|
||||
print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
|
||||
sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
|
||||
return nil
|
||||
@ -459,7 +459,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
||||
|
||||
p_end := p + p_size
|
||||
p += -p & (_PageSize - 1)
|
||||
if uintptr(p)+n > uintptr(h.arena_used) {
|
||||
if uintptr(p)+n > h.arena_used {
|
||||
mHeap_MapBits(h, p+n)
|
||||
mHeap_MapSpans(h, p+n)
|
||||
h.arena_used = p + n
|
||||
|
@ -384,8 +384,8 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
|
||||
}
|
||||
|
||||
// find span
|
||||
arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
|
||||
arena_used := uintptr(unsafe.Pointer(mheap_.arena_used))
|
||||
arena_start := mheap_.arena_start
|
||||
arena_used := mheap_.arena_used
|
||||
if uintptr(v) < arena_start || uintptr(v) >= arena_used {
|
||||
return
|
||||
}
|
||||
|
@ -581,7 +581,7 @@ HaveSpan:
|
||||
mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
|
||||
s.npages = npage
|
||||
p := uintptr(t.start)
|
||||
p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
|
||||
p -= (h.arena_start >> _PageShift)
|
||||
if p > 0 {
|
||||
h_spans[p-1] = s
|
||||
}
|
||||
@ -596,7 +596,7 @@ HaveSpan:
|
||||
s.unusedsince = 0
|
||||
|
||||
p := uintptr(s.start)
|
||||
p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
|
||||
p -= (h.arena_start >> _PageShift)
|
||||
for n := uintptr(0); n < npage; n++ {
|
||||
h_spans[p+n] = s
|
||||
}
|
||||
@ -663,7 +663,7 @@ func mHeap_Grow(h *mheap, npage uintptr) bool {
|
||||
s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
|
||||
mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
|
||||
p := uintptr(s.start)
|
||||
p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
|
||||
p -= (h.arena_start >> _PageShift)
|
||||
for i := p; i < p+s.npages; i++ {
|
||||
h_spans[i] = s
|
||||
}
|
||||
@ -679,7 +679,7 @@ func mHeap_Grow(h *mheap, npage uintptr) bool {
|
||||
// and is guaranteed to be start or end of span.
|
||||
func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
|
||||
p := uintptr(v)
|
||||
p -= uintptr(unsafe.Pointer(h.arena_start))
|
||||
p -= h.arena_start
|
||||
return h_spans[p>>_PageShift]
|
||||
}
|
||||
|
||||
@ -691,12 +691,12 @@ func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
|
||||
// other garbage in their middles, so we have to
|
||||
// check for that.
|
||||
func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
|
||||
if uintptr(v) < uintptr(unsafe.Pointer(h.arena_start)) || uintptr(v) >= uintptr(unsafe.Pointer(h.arena_used)) {
|
||||
if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
|
||||
return nil
|
||||
}
|
||||
p := uintptr(v) >> _PageShift
|
||||
q := p
|
||||
q -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
|
||||
q -= h.arena_start >> _PageShift
|
||||
s := h_spans[q]
|
||||
if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
|
||||
return nil
|
||||
@ -779,7 +779,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
|
||||
|
||||
// Coalesce with earlier, later spans.
|
||||
p := uintptr(s.start)
|
||||
p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
|
||||
p -= h.arena_start >> _PageShift
|
||||
if p > 0 {
|
||||
t := h_spans[p-1]
|
||||
if t != nil && t.state == _MSpanFree {
|
||||
|
Loading…
Reference in New Issue
Block a user