diff --git a/src/maps/maps.go b/src/maps/maps.go index 25a0142eae..27eea01501 100644 --- a/src/maps/maps.go +++ b/src/maps/maps.go @@ -53,6 +53,9 @@ func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M return true } +// clone is implemented in the runtime package. +func clone(m any) any + // Clone returns a copy of m. This is a shallow clone: // the new keys and values are set using ordinary assignment. func Clone[M ~map[K]V, K comparable, V any](m M) M { @@ -60,11 +63,7 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M { if m == nil { return nil } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r + return clone(m).(M) } // Copy copies all key/value pairs in src adding them to dst. diff --git a/src/maps/maps.s b/src/maps/maps.s new file mode 100644 index 0000000000..4e5577892d --- /dev/null +++ b/src/maps/maps.s @@ -0,0 +1,5 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// need this empty asm file to enable linkname. \ No newline at end of file diff --git a/src/maps/maps_test.go b/src/maps/maps_test.go index a7e03ad9b0..144f5375c9 100644 --- a/src/maps/maps_test.go +++ b/src/maps/maps_test.go @@ -179,3 +179,52 @@ func TestDeleteFunc(t *testing.T) { t.Errorf("DeleteFunc result = %v, want %v", mc, want) } } + +var n map[int]int + +func BenchmarkMapClone(b *testing.B) { + var m = make(map[int]int) + for i := 0; i < 1000000; i++ { + m[i] = i + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + n = Clone(m) + } +} + +func TestCloneWithDelete(t *testing.T) { + var m = make(map[int]int) + for i := 0; i < 32; i++ { + m[i] = i + } + for i := 8; i < 32; i++ { + delete(m, i) + } + m2 := Clone(m) + if len(m2) != 8 { + t.Errorf("len2(m2) = %d, want %d", len(m2), 8) + } + for i := 0; i < 8; i++ { + if m2[i] != m[i] { + t.Errorf("m2[%d] = %d, want %d", i, m2[i], m[i]) + } + } +} + +func TestCloneWithMapAssign(t *testing.T) { + var m = make(map[int]int) + const N = 25 + for i := 0; i < N; i++ { + m[i] = i + } + m2 := Clone(m) + if len(m2) != N { + t.Errorf("len2(m2) = %d, want %d", len(m2), N) + } + for i := 0; i < N; i++ { + if m2[i] != m[i] { + t.Errorf("m2[%d] = %d, want %d", i, m2[i], m[i]) + } + } +} diff --git a/src/runtime/map.go b/src/runtime/map.go index 9c3a7e2b8c..e98860fe7a 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1445,3 +1445,151 @@ var zeroVal [maxZero]byte // map init function to this symbol. Defined in assembly so as to avoid // complications with instrumentation (coverage, etc). func mapinitnoop() + +// mapclone for implementing maps.Clone +// +//go:linkname mapclone maps.clone +func mapclone(m any) any { + e := efaceOf(&m) + e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data))) + return m +} + +// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows +// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. +func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { + for i := 0; i < bucketCnt; i++ { + if isEmpty(src.tophash[i]) { + continue + } + + for ; pos < bucketCnt; pos++ { + if isEmpty(dst.tophash[pos]) { + break + } + } + + if pos == bucketCnt { + dst = h.newoverflow(t, dst) + pos = 0 + } + + srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.keysize)) + srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(i)*uintptr(t.elemsize)) + dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.keysize)) + dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(pos)*uintptr(t.elemsize)) + + dst.tophash[pos] = src.tophash[i] + if t.indirectkey() { + *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK) + } else { + typedmemmove(t.key, dstK, srcK) + } + if t.indirectelem() { + *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle) + } else { + typedmemmove(t.elem, dstEle, srcEle) + } + pos++ + h.count++ + } + return dst, pos +} + +func mapclone2(t *maptype, src *hmap) *hmap { + dst := makemap(t, src.count, nil) + dst.hash0 = src.hash0 + dst.nevacuate = 0 + //flags do not need to be copied here, just like a new map has no flags. + + if src.count == 0 { + return dst + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + if src.B == 0 { + dst.buckets = newobject(t.bucket) + dst.count = src.count + typedmemmove(t.bucket, dst.buckets, src.buckets) + return dst + } + + //src.B != 0 + if dst.B == 0 { + dst.buckets = newobject(t.bucket) + } + dstArraySize := int(bucketShift(dst.B)) + srcArraySize := int(bucketShift(src.B)) + for i := 0; i < dstArraySize; i++ { + dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.bucketsize)))) + pos := 0 + for j := 0; j < srcArraySize; j += dstArraySize { + srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.bucketsize)))) + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + } + } + + if src.oldbuckets == nil { + return dst + } + + oldB := src.B + srcOldbuckets := src.oldbuckets + if !src.sameSizeGrow() { + oldB-- + } + oldSrcArraySize := int(bucketShift(oldB)) + + for i := 0; i < oldSrcArraySize; i++ { + srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.bucketsize)))) + if evacuated(srcBmap) { + continue + } + + if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src + dstBmap := (*bmap)(add(dst.buckets, uintptr(i)&bucketMask(dst.B))) + for dstBmap.overflow(t) != nil { + dstBmap = dstBmap.overflow(t) + } + pos := 0 + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + continue + } + + for srcBmap != nil { + // move from oldBlucket to new bucket + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(srcBmap.tophash[i]) { + continue + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey() { + srcK = *((*unsafe.Pointer)(srcK)) + } + + srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) + if t.indirectelem() { + srcEle = *((*unsafe.Pointer)(srcEle)) + } + dstEle := mapassign(t, dst, srcK) + typedmemmove(t.elem, dstEle, srcEle) + } + srcBmap = srcBmap.overflow(t) + } + } + return dst +}