mirror of
https://github.com/golang/go
synced 2024-11-18 08:04:40 -07:00
cmd/compile: share compiler allocations of similar shapes
Use the same allocator for, e.g., []int32 and []int8. Anything with similar base shapes and be coerced into a single allocator, which helps reuse memory more often. There is not much unsafe in the compiler currently. This adds quite a bit, joining cmd/compiler/internal/base/mapfile_mmap.go and some unsafe.Sizeof calls. Change-Id: I95d6d6e47c42b9f0a45f3556f4d7605735e65d99 Reviewed-on: https://go-review.googlesource.com/c/go/+/461084 Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Run-TryBot: Keith Randall <khr@golang.org>
This commit is contained in:
parent
87366feb12
commit
0e42632301
@ -23,6 +23,12 @@ type allocator struct {
|
|||||||
maxLog int // log_2 of maximum allocation size
|
maxLog int // log_2 of maximum allocation size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type derived struct {
|
||||||
|
name string // name for alloc/free functions
|
||||||
|
typ string // the type they return/accept
|
||||||
|
base string // underlying allocator
|
||||||
|
}
|
||||||
|
|
||||||
func genAllocators() {
|
func genAllocators() {
|
||||||
allocators := []allocator{
|
allocators := []allocator{
|
||||||
{
|
{
|
||||||
@ -36,65 +42,15 @@ func genAllocators() {
|
|||||||
maxLog: 32,
|
maxLog: 32,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "BlockSlice",
|
name: "Int64Slice",
|
||||||
typ: "[]*Block",
|
typ: "[]int64",
|
||||||
capacity: "cap(%s)",
|
capacity: "cap(%s)",
|
||||||
mak: "make([]*Block, %s)",
|
mak: "make([]int64, %s)",
|
||||||
resize: "%s[:%s]",
|
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = nil\n}",
|
|
||||||
minLog: 5,
|
|
||||||
maxLog: 32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "BoolSlice",
|
|
||||||
typ: "[]bool",
|
|
||||||
capacity: "cap(%s)",
|
|
||||||
mak: "make([]bool, %s)",
|
|
||||||
resize: "%s[:%s]",
|
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = false\n}",
|
|
||||||
minLog: 8,
|
|
||||||
maxLog: 32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IntSlice",
|
|
||||||
typ: "[]int",
|
|
||||||
capacity: "cap(%s)",
|
|
||||||
mak: "make([]int, %s)",
|
|
||||||
resize: "%s[:%s]",
|
resize: "%s[:%s]",
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
|
clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
|
||||||
minLog: 5,
|
minLog: 5,
|
||||||
maxLog: 32,
|
maxLog: 32,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Int32Slice",
|
|
||||||
typ: "[]int32",
|
|
||||||
capacity: "cap(%s)",
|
|
||||||
mak: "make([]int32, %s)",
|
|
||||||
resize: "%s[:%s]",
|
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
|
|
||||||
minLog: 6,
|
|
||||||
maxLog: 32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Int8Slice",
|
|
||||||
typ: "[]int8",
|
|
||||||
capacity: "cap(%s)",
|
|
||||||
mak: "make([]int8, %s)",
|
|
||||||
resize: "%s[:%s]",
|
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
|
|
||||||
minLog: 8,
|
|
||||||
maxLog: 32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IDSlice",
|
|
||||||
typ: "[]ID",
|
|
||||||
capacity: "cap(%s)",
|
|
||||||
mak: "make([]ID, %s)",
|
|
||||||
resize: "%s[:%s]",
|
|
||||||
clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
|
|
||||||
minLog: 6,
|
|
||||||
maxLog: 32,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "SparseSet",
|
name: "SparseSet",
|
||||||
typ: "*sparseSet",
|
typ: "*sparseSet",
|
||||||
@ -126,6 +82,38 @@ func genAllocators() {
|
|||||||
maxLog: 32,
|
maxLog: 32,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
deriveds := []derived{
|
||||||
|
{
|
||||||
|
name: "BlockSlice",
|
||||||
|
typ: "[]*Block",
|
||||||
|
base: "ValueSlice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IntSlice",
|
||||||
|
typ: "[]int",
|
||||||
|
base: "Int64Slice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Int32Slice",
|
||||||
|
typ: "[]int32",
|
||||||
|
base: "Int64Slice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Int8Slice",
|
||||||
|
typ: "[]int8",
|
||||||
|
base: "Int64Slice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "BoolSlice",
|
||||||
|
typ: "[]bool",
|
||||||
|
base: "Int64Slice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IDSlice",
|
||||||
|
typ: "[]ID",
|
||||||
|
base: "Int64Slice",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
w := new(bytes.Buffer)
|
w := new(bytes.Buffer)
|
||||||
fmt.Fprintf(w, "// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT.\n")
|
fmt.Fprintf(w, "// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT.\n")
|
||||||
@ -133,12 +121,22 @@ func genAllocators() {
|
|||||||
fmt.Fprintln(w, "package ssa")
|
fmt.Fprintln(w, "package ssa")
|
||||||
|
|
||||||
fmt.Fprintln(w, "import (")
|
fmt.Fprintln(w, "import (")
|
||||||
|
fmt.Fprintln(w, "\"internal/unsafeheader\"")
|
||||||
fmt.Fprintln(w, "\"math/bits\"")
|
fmt.Fprintln(w, "\"math/bits\"")
|
||||||
fmt.Fprintln(w, "\"sync\"")
|
fmt.Fprintln(w, "\"sync\"")
|
||||||
|
fmt.Fprintln(w, "\"unsafe\"")
|
||||||
fmt.Fprintln(w, ")")
|
fmt.Fprintln(w, ")")
|
||||||
for _, a := range allocators {
|
for _, a := range allocators {
|
||||||
genAllocator(w, a)
|
genAllocator(w, a)
|
||||||
}
|
}
|
||||||
|
for _, d := range deriveds {
|
||||||
|
for _, base := range allocators {
|
||||||
|
if base.name == d.base {
|
||||||
|
genDerived(w, d, base)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// gofmt result
|
// gofmt result
|
||||||
b := w.Bytes()
|
b := w.Bytes()
|
||||||
var err error
|
var err error
|
||||||
@ -196,3 +194,32 @@ func genAllocator(w io.Writer, a allocator) {
|
|||||||
}
|
}
|
||||||
fmt.Fprintf(w, "}\n")
|
fmt.Fprintf(w, "}\n")
|
||||||
}
|
}
|
||||||
|
func genDerived(w io.Writer, d derived, base allocator) {
|
||||||
|
fmt.Fprintf(w, "func (c *Cache) alloc%s(n int) %s {\n", d.name, d.typ)
|
||||||
|
if d.typ[:2] != "[]" || base.typ[:2] != "[]" {
|
||||||
|
panic(fmt.Sprintf("bad derived types: %s %s", d.typ, base.typ))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "var base %s\n", base.typ[2:])
|
||||||
|
fmt.Fprintf(w, "var derived %s\n", d.typ[2:])
|
||||||
|
fmt.Fprintf(w, "if unsafe.Sizeof(base)%%unsafe.Sizeof(derived) != 0 { panic(\"bad\") }\n")
|
||||||
|
fmt.Fprintf(w, "scale := unsafe.Sizeof(base)/unsafe.Sizeof(derived)\n")
|
||||||
|
fmt.Fprintf(w, "b := c.alloc%s(int((uintptr(n)+scale-1)/scale))\n", base.name)
|
||||||
|
fmt.Fprintf(w, "s := unsafeheader.Slice {\n")
|
||||||
|
fmt.Fprintf(w, " Data: unsafe.Pointer(&b[0]),\n")
|
||||||
|
fmt.Fprintf(w, " Len: n,\n")
|
||||||
|
fmt.Fprintf(w, " Cap: cap(b)*int(scale),\n")
|
||||||
|
fmt.Fprintf(w, " }\n")
|
||||||
|
fmt.Fprintf(w, "return *(*%s)(unsafe.Pointer(&s))\n", d.typ)
|
||||||
|
fmt.Fprintf(w, "}\n")
|
||||||
|
fmt.Fprintf(w, "func (c *Cache) free%s(s %s) {\n", d.name, d.typ)
|
||||||
|
fmt.Fprintf(w, "var base %s\n", base.typ[2:])
|
||||||
|
fmt.Fprintf(w, "var derived %s\n", d.typ[2:])
|
||||||
|
fmt.Fprintf(w, "scale := unsafe.Sizeof(base)/unsafe.Sizeof(derived)\n")
|
||||||
|
fmt.Fprintf(w, "b := unsafeheader.Slice {\n")
|
||||||
|
fmt.Fprintf(w, " Data: unsafe.Pointer(&s[0]),\n")
|
||||||
|
fmt.Fprintf(w, " Len: int((uintptr(len(s))+scale-1)/scale),\n")
|
||||||
|
fmt.Fprintf(w, " Cap: int((uintptr(cap(s))+scale-1)/scale),\n")
|
||||||
|
fmt.Fprintf(w, " }\n")
|
||||||
|
fmt.Fprintf(w, "c.free%s(*(*%s)(unsafe.Pointer(&b)))\n", base.name, base.typ)
|
||||||
|
fmt.Fprintf(w, "}\n")
|
||||||
|
}
|
||||||
|
@ -3,8 +3,10 @@
|
|||||||
package ssa
|
package ssa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"internal/unsafeheader"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
"sync"
|
"sync"
|
||||||
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
var poolFreeValueSlice [27]sync.Pool
|
var poolFreeValueSlice [27]sync.Pool
|
||||||
@ -45,232 +47,42 @@ func (c *Cache) freeValueSlice(s []*Value) {
|
|||||||
poolFreeValueSlice[b-5].Put(sp)
|
poolFreeValueSlice[b-5].Put(sp)
|
||||||
}
|
}
|
||||||
|
|
||||||
var poolFreeBlockSlice [27]sync.Pool
|
var poolFreeInt64Slice [27]sync.Pool
|
||||||
|
|
||||||
func (c *Cache) allocBlockSlice(n int) []*Block {
|
func (c *Cache) allocInt64Slice(n int) []int64 {
|
||||||
var s []*Block
|
var s []int64
|
||||||
n2 := n
|
n2 := n
|
||||||
if n2 < 32 {
|
if n2 < 32 {
|
||||||
n2 = 32
|
n2 = 32
|
||||||
}
|
}
|
||||||
b := bits.Len(uint(n2 - 1))
|
b := bits.Len(uint(n2 - 1))
|
||||||
v := poolFreeBlockSlice[b-5].Get()
|
v := poolFreeInt64Slice[b-5].Get()
|
||||||
if v == nil {
|
if v == nil {
|
||||||
s = make([]*Block, 1<<b)
|
s = make([]int64, 1<<b)
|
||||||
} else {
|
} else {
|
||||||
sp := v.(*[]*Block)
|
sp := v.(*[]int64)
|
||||||
s = *sp
|
s = *sp
|
||||||
*sp = nil
|
*sp = nil
|
||||||
c.hdrBlockSlice = append(c.hdrBlockSlice, sp)
|
c.hdrInt64Slice = append(c.hdrInt64Slice, sp)
|
||||||
}
|
}
|
||||||
s = s[:n]
|
s = s[:n]
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
func (c *Cache) freeBlockSlice(s []*Block) {
|
func (c *Cache) freeInt64Slice(s []int64) {
|
||||||
for i := range s {
|
|
||||||
s[i] = nil
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
|
||||||
var sp *[]*Block
|
|
||||||
if len(c.hdrBlockSlice) == 0 {
|
|
||||||
sp = new([]*Block)
|
|
||||||
} else {
|
|
||||||
sp = c.hdrBlockSlice[len(c.hdrBlockSlice)-1]
|
|
||||||
c.hdrBlockSlice[len(c.hdrBlockSlice)-1] = nil
|
|
||||||
c.hdrBlockSlice = c.hdrBlockSlice[:len(c.hdrBlockSlice)-1]
|
|
||||||
}
|
|
||||||
*sp = s
|
|
||||||
poolFreeBlockSlice[b-5].Put(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
var poolFreeBoolSlice [24]sync.Pool
|
|
||||||
|
|
||||||
func (c *Cache) allocBoolSlice(n int) []bool {
|
|
||||||
var s []bool
|
|
||||||
n2 := n
|
|
||||||
if n2 < 256 {
|
|
||||||
n2 = 256
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(n2 - 1))
|
|
||||||
v := poolFreeBoolSlice[b-8].Get()
|
|
||||||
if v == nil {
|
|
||||||
s = make([]bool, 1<<b)
|
|
||||||
} else {
|
|
||||||
sp := v.(*[]bool)
|
|
||||||
s = *sp
|
|
||||||
*sp = nil
|
|
||||||
c.hdrBoolSlice = append(c.hdrBoolSlice, sp)
|
|
||||||
}
|
|
||||||
s = s[:n]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (c *Cache) freeBoolSlice(s []bool) {
|
|
||||||
for i := range s {
|
|
||||||
s[i] = false
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
|
||||||
var sp *[]bool
|
|
||||||
if len(c.hdrBoolSlice) == 0 {
|
|
||||||
sp = new([]bool)
|
|
||||||
} else {
|
|
||||||
sp = c.hdrBoolSlice[len(c.hdrBoolSlice)-1]
|
|
||||||
c.hdrBoolSlice[len(c.hdrBoolSlice)-1] = nil
|
|
||||||
c.hdrBoolSlice = c.hdrBoolSlice[:len(c.hdrBoolSlice)-1]
|
|
||||||
}
|
|
||||||
*sp = s
|
|
||||||
poolFreeBoolSlice[b-8].Put(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
var poolFreeIntSlice [27]sync.Pool
|
|
||||||
|
|
||||||
func (c *Cache) allocIntSlice(n int) []int {
|
|
||||||
var s []int
|
|
||||||
n2 := n
|
|
||||||
if n2 < 32 {
|
|
||||||
n2 = 32
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(n2 - 1))
|
|
||||||
v := poolFreeIntSlice[b-5].Get()
|
|
||||||
if v == nil {
|
|
||||||
s = make([]int, 1<<b)
|
|
||||||
} else {
|
|
||||||
sp := v.(*[]int)
|
|
||||||
s = *sp
|
|
||||||
*sp = nil
|
|
||||||
c.hdrIntSlice = append(c.hdrIntSlice, sp)
|
|
||||||
}
|
|
||||||
s = s[:n]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (c *Cache) freeIntSlice(s []int) {
|
|
||||||
for i := range s {
|
for i := range s {
|
||||||
s[i] = 0
|
s[i] = 0
|
||||||
}
|
}
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
b := bits.Len(uint(cap(s)) - 1)
|
||||||
var sp *[]int
|
var sp *[]int64
|
||||||
if len(c.hdrIntSlice) == 0 {
|
if len(c.hdrInt64Slice) == 0 {
|
||||||
sp = new([]int)
|
sp = new([]int64)
|
||||||
} else {
|
} else {
|
||||||
sp = c.hdrIntSlice[len(c.hdrIntSlice)-1]
|
sp = c.hdrInt64Slice[len(c.hdrInt64Slice)-1]
|
||||||
c.hdrIntSlice[len(c.hdrIntSlice)-1] = nil
|
c.hdrInt64Slice[len(c.hdrInt64Slice)-1] = nil
|
||||||
c.hdrIntSlice = c.hdrIntSlice[:len(c.hdrIntSlice)-1]
|
c.hdrInt64Slice = c.hdrInt64Slice[:len(c.hdrInt64Slice)-1]
|
||||||
}
|
}
|
||||||
*sp = s
|
*sp = s
|
||||||
poolFreeIntSlice[b-5].Put(sp)
|
poolFreeInt64Slice[b-5].Put(sp)
|
||||||
}
|
|
||||||
|
|
||||||
var poolFreeInt32Slice [26]sync.Pool
|
|
||||||
|
|
||||||
func (c *Cache) allocInt32Slice(n int) []int32 {
|
|
||||||
var s []int32
|
|
||||||
n2 := n
|
|
||||||
if n2 < 64 {
|
|
||||||
n2 = 64
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(n2 - 1))
|
|
||||||
v := poolFreeInt32Slice[b-6].Get()
|
|
||||||
if v == nil {
|
|
||||||
s = make([]int32, 1<<b)
|
|
||||||
} else {
|
|
||||||
sp := v.(*[]int32)
|
|
||||||
s = *sp
|
|
||||||
*sp = nil
|
|
||||||
c.hdrInt32Slice = append(c.hdrInt32Slice, sp)
|
|
||||||
}
|
|
||||||
s = s[:n]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (c *Cache) freeInt32Slice(s []int32) {
|
|
||||||
for i := range s {
|
|
||||||
s[i] = 0
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
|
||||||
var sp *[]int32
|
|
||||||
if len(c.hdrInt32Slice) == 0 {
|
|
||||||
sp = new([]int32)
|
|
||||||
} else {
|
|
||||||
sp = c.hdrInt32Slice[len(c.hdrInt32Slice)-1]
|
|
||||||
c.hdrInt32Slice[len(c.hdrInt32Slice)-1] = nil
|
|
||||||
c.hdrInt32Slice = c.hdrInt32Slice[:len(c.hdrInt32Slice)-1]
|
|
||||||
}
|
|
||||||
*sp = s
|
|
||||||
poolFreeInt32Slice[b-6].Put(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
var poolFreeInt8Slice [24]sync.Pool
|
|
||||||
|
|
||||||
func (c *Cache) allocInt8Slice(n int) []int8 {
|
|
||||||
var s []int8
|
|
||||||
n2 := n
|
|
||||||
if n2 < 256 {
|
|
||||||
n2 = 256
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(n2 - 1))
|
|
||||||
v := poolFreeInt8Slice[b-8].Get()
|
|
||||||
if v == nil {
|
|
||||||
s = make([]int8, 1<<b)
|
|
||||||
} else {
|
|
||||||
sp := v.(*[]int8)
|
|
||||||
s = *sp
|
|
||||||
*sp = nil
|
|
||||||
c.hdrInt8Slice = append(c.hdrInt8Slice, sp)
|
|
||||||
}
|
|
||||||
s = s[:n]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (c *Cache) freeInt8Slice(s []int8) {
|
|
||||||
for i := range s {
|
|
||||||
s[i] = 0
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
|
||||||
var sp *[]int8
|
|
||||||
if len(c.hdrInt8Slice) == 0 {
|
|
||||||
sp = new([]int8)
|
|
||||||
} else {
|
|
||||||
sp = c.hdrInt8Slice[len(c.hdrInt8Slice)-1]
|
|
||||||
c.hdrInt8Slice[len(c.hdrInt8Slice)-1] = nil
|
|
||||||
c.hdrInt8Slice = c.hdrInt8Slice[:len(c.hdrInt8Slice)-1]
|
|
||||||
}
|
|
||||||
*sp = s
|
|
||||||
poolFreeInt8Slice[b-8].Put(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
var poolFreeIDSlice [26]sync.Pool
|
|
||||||
|
|
||||||
func (c *Cache) allocIDSlice(n int) []ID {
|
|
||||||
var s []ID
|
|
||||||
n2 := n
|
|
||||||
if n2 < 64 {
|
|
||||||
n2 = 64
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(n2 - 1))
|
|
||||||
v := poolFreeIDSlice[b-6].Get()
|
|
||||||
if v == nil {
|
|
||||||
s = make([]ID, 1<<b)
|
|
||||||
} else {
|
|
||||||
sp := v.(*[]ID)
|
|
||||||
s = *sp
|
|
||||||
*sp = nil
|
|
||||||
c.hdrIDSlice = append(c.hdrIDSlice, sp)
|
|
||||||
}
|
|
||||||
s = s[:n]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (c *Cache) freeIDSlice(s []ID) {
|
|
||||||
for i := range s {
|
|
||||||
s[i] = 0
|
|
||||||
}
|
|
||||||
b := bits.Len(uint(cap(s)) - 1)
|
|
||||||
var sp *[]ID
|
|
||||||
if len(c.hdrIDSlice) == 0 {
|
|
||||||
sp = new([]ID)
|
|
||||||
} else {
|
|
||||||
sp = c.hdrIDSlice[len(c.hdrIDSlice)-1]
|
|
||||||
c.hdrIDSlice[len(c.hdrIDSlice)-1] = nil
|
|
||||||
c.hdrIDSlice = c.hdrIDSlice[:len(c.hdrIDSlice)-1]
|
|
||||||
}
|
|
||||||
*sp = s
|
|
||||||
poolFreeIDSlice[b-6].Put(sp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var poolFreeSparseSet [27]sync.Pool
|
var poolFreeSparseSet [27]sync.Pool
|
||||||
@ -341,3 +153,159 @@ func (c *Cache) freeSparseMapPos(s *sparseMapPos) {
|
|||||||
b := bits.Len(uint(s.cap()) - 1)
|
b := bits.Len(uint(s.cap()) - 1)
|
||||||
poolFreeSparseMapPos[b-5].Put(s)
|
poolFreeSparseMapPos[b-5].Put(s)
|
||||||
}
|
}
|
||||||
|
func (c *Cache) allocBlockSlice(n int) []*Block {
|
||||||
|
var base *Value
|
||||||
|
var derived *Block
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocValueSlice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]*Block)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeBlockSlice(s []*Block) {
|
||||||
|
var base *Value
|
||||||
|
var derived *Block
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeValueSlice(*(*[]*Value)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
func (c *Cache) allocIntSlice(n int) []int {
|
||||||
|
var base int64
|
||||||
|
var derived int
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]int)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeIntSlice(s []int) {
|
||||||
|
var base int64
|
||||||
|
var derived int
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
func (c *Cache) allocInt32Slice(n int) []int32 {
|
||||||
|
var base int64
|
||||||
|
var derived int32
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]int32)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeInt32Slice(s []int32) {
|
||||||
|
var base int64
|
||||||
|
var derived int32
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
func (c *Cache) allocInt8Slice(n int) []int8 {
|
||||||
|
var base int64
|
||||||
|
var derived int8
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]int8)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeInt8Slice(s []int8) {
|
||||||
|
var base int64
|
||||||
|
var derived int8
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
func (c *Cache) allocBoolSlice(n int) []bool {
|
||||||
|
var base int64
|
||||||
|
var derived bool
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]bool)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeBoolSlice(s []bool) {
|
||||||
|
var base int64
|
||||||
|
var derived bool
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
func (c *Cache) allocIDSlice(n int) []ID {
|
||||||
|
var base int64
|
||||||
|
var derived ID
|
||||||
|
if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
|
||||||
|
panic("bad")
|
||||||
|
}
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
|
||||||
|
s := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&b[0]),
|
||||||
|
Len: n,
|
||||||
|
Cap: cap(b) * int(scale),
|
||||||
|
}
|
||||||
|
return *(*[]ID)(unsafe.Pointer(&s))
|
||||||
|
}
|
||||||
|
func (c *Cache) freeIDSlice(s []ID) {
|
||||||
|
var base int64
|
||||||
|
var derived ID
|
||||||
|
scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
|
||||||
|
b := unsafeheader.Slice{
|
||||||
|
Data: unsafe.Pointer(&s[0]),
|
||||||
|
Len: int((uintptr(len(s)) + scale - 1) / scale),
|
||||||
|
Cap: int((uintptr(cap(s)) + scale - 1) / scale),
|
||||||
|
}
|
||||||
|
c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
|
||||||
|
}
|
||||||
|
@ -34,12 +34,7 @@ type Cache struct {
|
|||||||
// Free "headers" for use by the allocators in allocators.go.
|
// Free "headers" for use by the allocators in allocators.go.
|
||||||
// Used to put slices in sync.Pools without allocation.
|
// Used to put slices in sync.Pools without allocation.
|
||||||
hdrValueSlice []*[]*Value
|
hdrValueSlice []*[]*Value
|
||||||
hdrBlockSlice []*[]*Block
|
hdrInt64Slice []*[]int64
|
||||||
hdrBoolSlice []*[]bool
|
|
||||||
hdrIntSlice []*[]int
|
|
||||||
hdrInt32Slice []*[]int32
|
|
||||||
hdrInt8Slice []*[]int8
|
|
||||||
hdrIDSlice []*[]ID
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) Reset() {
|
func (c *Cache) Reset() {
|
||||||
|
Loading…
Reference in New Issue
Block a user