diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 73988b6a2bb..5adc2b87130 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -242,9 +242,9 @@ type ArrayType struct { } // Len returns the length of t if t is an array type, otherwise 0 -func (t *Type) Len() uintptr { +func (t *Type) Len() int { if t.Kind() == Array { - return (*ArrayType)(unsafe.Pointer(t)).Len + return int((*ArrayType)(unsafe.Pointer(t)).Len) } return 0 } diff --git a/src/reflect/abi.go b/src/reflect/abi.go index 8ae8964bfe4..2b5f4053805 100644 --- a/src/reflect/abi.go +++ b/src/reflect/abi.go @@ -121,7 +121,7 @@ func (a *abiSeq) stepsForValue(i int) []abiStep { // // If the value was stack-assigned, returns the single // abiStep describing that translation, and nil otherwise. -func (a *abiSeq) addArg(t *rtype) *abiStep { +func (a *abiSeq) addArg(t *abi.Type) *abiStep { // We'll always be adding a new value, so do that first. pStart := len(a.steps) a.valueStart = append(a.valueStart, pStart) @@ -162,11 +162,11 @@ func (a *abiSeq) addArg(t *rtype) *abiStep { // If the receiver was stack-assigned, returns the single // abiStep describing that translation, and nil otherwise. // Returns true if the receiver is a pointer. -func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) { +func (a *abiSeq) addRcvr(rcvr *abi.Type) (*abiStep, bool) { // The receiver is always one word. a.valueStart = append(a.valueStart, len(a.steps)) var ok, ptr bool - if ifaceIndir(rcvr) || rcvr.pointers() { + if ifaceIndir(rcvr) || rcvr.Pointers() { ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1) ptr = true } else { @@ -195,8 +195,8 @@ func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) { // // This method along with the assign* methods represent the // complete register-assignment algorithm for the Go ABI. -func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool { - switch t.Kind() { +func (a *abiSeq) regAssign(t *abi.Type, offset uintptr) bool { + switch Kind(t.Kind()) { case UnsafePointer, Pointer, Chan, Map, Func: return a.assignIntN(offset, t.Size(), 1, 0b1) case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr: @@ -229,7 +229,7 @@ func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool { // try to stack-assign this value. return true case 1: - return a.regAssign(toRType(tt.Elem), offset) + return a.regAssign(tt.Elem, offset) default: return false } @@ -384,7 +384,7 @@ func dumpPtrBitMap(b abi.IntArgRegBitmap) { } } -func newAbiDesc(t *funcType, rcvr *rtype) abiDesc { +func newAbiDesc(t *funcType, rcvr *abi.Type) abiDesc { // We need to add space for this argument to // the frame so that it can spill args into it. // @@ -417,9 +417,9 @@ func newAbiDesc(t *funcType, rcvr *rtype) abiDesc { } } for i, arg := range t.InSlice() { - stkStep := in.addArg(toRType(arg)) + stkStep := in.addArg(arg) if stkStep != nil { - addTypeBits(stackPtrs, stkStep.stkOff, toRType(arg)) + addTypeBits(stackPtrs, stkStep.stkOff, arg) } else { spill = align(spill, uintptr(arg.Align())) spill += arg.Size() @@ -450,9 +450,9 @@ func newAbiDesc(t *funcType, rcvr *rtype) abiDesc { // the return offset. out.stackBytes = retOffset for i, res := range t.OutSlice() { - stkStep := out.addArg(toRType(res)) + stkStep := out.addArg(res) if stkStep != nil { - addTypeBits(stackPtrs, stkStep.stkOff, toRType(res)) + addTypeBits(stackPtrs, stkStep.stkOff, res) } else { for _, st := range out.stepsForValue(i) { if st.kind == abiStepPointer { diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go index b361f142d95..0c78dbb8c85 100644 --- a/src/reflect/deepequal.go +++ b/src/reflect/deepequal.go @@ -39,7 +39,7 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool { hard := func(v1, v2 Value) bool { switch v1.Kind() { case Pointer: - if v1.typ.t.PtrBytes == 0 { + if v1.typ.PtrBytes == 0 { // not-in-heap pointers can't be cyclic. // At least, all of our current uses of runtime/internal/sys.NotInHeap // have that property. The runtime ones aren't cyclic (and we don't use diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index 602cf33b313..2496c8dcd9b 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -5,6 +5,7 @@ package reflect import ( + "internal/abi" "internal/goarch" "sync" "unsafe" @@ -29,17 +30,17 @@ var CallGC = &callGC // takes up one byte, so that writing out test cases is a little clearer. // If ptrs is false, gc will be nil. func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack, gc, inReg, outReg []byte, ptrs bool) { - var ft *rtype + var ft *abi.Type var abid abiDesc if rcvr != nil { - ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype)) + ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.common())), rcvr.common()) } else { ft, _, abid = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) } // Extract size information. argSize = abid.stackCallArgsSize retOffset = abid.retOffset - frametype = ft + frametype = toType(ft) // Expand stack pointer bitmap into byte-map. for i := uint32(0); i < abid.stackPtrs.n; i++ { @@ -57,15 +58,15 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, inReg = append(inReg, bool2byte(abid.inRegPtrs.Get(i))) outReg = append(outReg, bool2byte(abid.outRegPtrs.Get(i))) } - if ft.t.Kind_&kindGCProg != 0 { + if ft.Kind_&kindGCProg != 0 { panic("can't handle gc programs") } // Expand frame type's GC bitmap into byte-map. - ptrs = ft.t.PtrBytes != 0 + ptrs = ft.PtrBytes != 0 if ptrs { - nptrs := ft.t.PtrBytes / goarch.PtrSize - gcdata := ft.gcSlice(0, (nptrs+7)/8) + nptrs := ft.PtrBytes / goarch.PtrSize + gcdata := ft.GcSlice(0, (nptrs+7)/8) for i := uintptr(0); i < nptrs; i++ { gc = append(gc, gcdata[i/8]>>(i%8)&1) } @@ -91,7 +92,7 @@ var GCBits = gcbits func gcbits(any) []byte // provided by runtime func MapBucketOf(x, y Type) Type { - return bucketOf(x.(*rtype), y.(*rtype)) + return toType(bucketOf(x.common(), y.common())) } func CachedBucketOf(m Type) Type { @@ -100,7 +101,7 @@ func CachedBucketOf(m Type) Type { panic("not map") } tt := (*mapType)(unsafe.Pointer(t)) - return tt.Bucket + return toType(tt.Bucket) } type EmbedWithUnexpMeth struct{} diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go index 6e8aeafabe8..6f9be089177 100644 --- a/src/reflect/makefunc.go +++ b/src/reflect/makefunc.go @@ -126,7 +126,7 @@ func makeMethodValue(op string, v Value) Value { // but we want Interface() and other operations to fail early. methodReceiver(op, fv.rcvr, fv.method) - return Value{toRType(&ftyp.Type), unsafe.Pointer(fv), v.flag&flagRO | flag(Func)} + return Value{ftyp.Common(), unsafe.Pointer(fv), v.flag&flagRO | flag(Func)} } func methodValueCallCodePtr() uintptr { diff --git a/src/reflect/swapper.go b/src/reflect/swapper.go index 25cd6ef6dab..1e8f4ed1636 100644 --- a/src/reflect/swapper.go +++ b/src/reflect/swapper.go @@ -5,6 +5,7 @@ package reflect import ( + "internal/abi" "internal/goarch" "internal/unsafeheader" "unsafe" @@ -31,9 +32,9 @@ func Swapper(slice any) func(i, j int) { } } - typ := v.Type().Elem().(*rtype) + typ := v.Type().Elem().common() size := typ.Size() - hasPtr := typ.t.PtrBytes != 0 + hasPtr := typ.PtrBytes != 0 // Some common & small cases, without using memmove: if hasPtr { @@ -41,7 +42,7 @@ func Swapper(slice any) func(i, j int) { ps := *(*[]unsafe.Pointer)(v.ptr) return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] } } - if typ.Kind() == String { + if typ.Kind() == abi.String { ss := *(*[]string)(v.ptr) return func(i, j int) { ss[i], ss[j] = ss[j], ss[i] } } diff --git a/src/reflect/type.go b/src/reflect/type.go index 618204780a6..b027077aff1 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -218,7 +218,7 @@ type Type interface { // It panics if i is not in the range [0, NumOut()). Out(i int) Type - common() *rtype + common() *abi.Type uncommon() *uncommonType } @@ -273,21 +273,34 @@ const ( // Ptr is the old name for the Pointer kind. const Ptr = Pointer +// uncommonType is present only for defined types or types with methods +// (if T is a defined type, the uncommonTypes for T and *T have methods). +// Using a pointer to this struct reduces the overall size required +// to describe a non-defined type with no methods. +type uncommonType = abi.UncommonType + +// Embed this type to get common/uncommon +type common struct { + abi.Type +} + // rtype is the common implementation of most values. // It is embedded in other struct types. type rtype struct { t abi.Type } -type nameOff = abi.NameOff -type typeOff = abi.TypeOff -type textOff = abi.TextOff +func (t *rtype) common() *abi.Type { + return &t.t +} -// uncommonType is present only for defined types or types with methods -// (if T is a defined type, the uncommonTypes for T and *T have methods). -// Using a pointer to this struct reduces the overall size required -// to describe a non-defined type with no methods. -type uncommonType = abi.UncommonType +func (t *rtype) uncommon() *abi.UncommonType { + return t.t.Uncommon() +} + +type aNameOff = abi.NameOff +type aTypeOff = abi.TypeOff +type aTextOff = abi.TextOff // ChanDir represents a channel type's direction. type ChanDir int @@ -319,53 +332,54 @@ type funcType = abi.FuncType // interfaceType represents an interface type. type interfaceType struct { - rtype - PkgPath abi.Name // import path - Methods []abi.Imethod // sorted by hash + abi.InterfaceType // can embed directly because not a public type. +} + +func (t *interfaceType) nameOff(off aNameOff) abi.Name { + return toRType(&t.Type).nameOff(off) +} + +func nameOffFor(t *abi.Type, off aNameOff) abi.Name { + return toRType(t).nameOff(off) +} + +func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type { + return toRType(t).typeOff(off) +} + +func (t *interfaceType) typeOff(off aTypeOff) *abi.Type { + return toRType(&t.Type).typeOff(off) +} + +func (t *interfaceType) common() *abi.Type { + return &t.Type +} + +func (t *interfaceType) uncommon() *abi.UncommonType { + return t.Uncommon() } // mapType represents a map type. type mapType struct { - rtype - Key *rtype // map key type - Elem *rtype // map element (value) type - Bucket *rtype // internal bucket structure - // function for hashing keys (ptr to key, seed) -> hash - Hasher func(unsafe.Pointer, uintptr) uintptr - Keysize uint8 // size of key slot - Valuesize uint8 // size of value slot - Bucketsize uint16 // size of bucket - Flags uint32 + abi.MapType } // ptrType represents a pointer type. type ptrType struct { - rtype - Elem *rtype // pointer element (pointed at) type + abi.PtrType } // sliceType represents a slice type. type sliceType struct { - rtype - Elem *rtype // slice element type + abi.SliceType } // Struct field -type structField struct { - Name abi.Name // name is always non-empty - Typ *rtype // type of field - Offset uintptr // byte offset of field -} - -func (f *structField) embedded() bool { - return f.Name.IsEmbedded() -} +type structField = abi.StructField // structType represents a struct type. type structType struct { - rtype - PkgPath abi.Name - Fields []structField // sorted by offset + abi.StructType } func pkgPath(n abi.Name) string { @@ -483,37 +497,37 @@ func addReflectOff(ptr unsafe.Pointer) int32 // resolveReflectName adds a name to the reflection lookup map in the runtime. // It returns a new nameOff that can be used to refer to the pointer. -func resolveReflectName(n abi.Name) nameOff { - return nameOff(addReflectOff(unsafe.Pointer(n.Bytes))) +func resolveReflectName(n abi.Name) aNameOff { + return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes))) } // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. // It returns a new typeOff that can be used to refer to the pointer. -func resolveReflectType(t *rtype) typeOff { - return typeOff(addReflectOff(unsafe.Pointer(t))) +func resolveReflectType(t *abi.Type) aTypeOff { + return aTypeOff(addReflectOff(unsafe.Pointer(t))) } // resolveReflectText adds a function pointer to the reflection lookup map in // the runtime. It returns a new textOff that can be used to refer to the // pointer. -func resolveReflectText(ptr unsafe.Pointer) textOff { - return textOff(addReflectOff(ptr)) +func resolveReflectText(ptr unsafe.Pointer) aTextOff { + return aTextOff(addReflectOff(ptr)) } -func (t *rtype) nameOff(off nameOff) abi.Name { +func (t *rtype) nameOff(off aNameOff) abi.Name { return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} } -func (t *rtype) typeOff(off typeOff) *rtype { - return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) +func (t *rtype) typeOff(off aTypeOff) *abi.Type { + return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off))) } -func (t *rtype) textOff(off textOff) unsafe.Pointer { +func (t *rtype) textOff(off aTextOff) unsafe.Pointer { return resolveTextOff(unsafe.Pointer(t), int32(off)) } -func (t *rtype) uncommon() *uncommonType { - return t.t.Uncommon() +func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer { + return toRType(t).textOff(off) } func (t *rtype) String() string { @@ -543,10 +557,6 @@ func (t *rtype) FieldAlign() int { return t.t.FieldAlign() } func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) } -func (t *rtype) pointers() bool { return t.t.PtrBytes != 0 } - -func (t *rtype) common() *rtype { return t } - func (t *rtype) exportedMethods() []abi.Method { ut := t.uncommon() if ut == nil { @@ -591,7 +601,7 @@ func (t *rtype) Method(i int) (m Method) { m.Type = mt tfn := t.textOff(p.Tfn) fn := unsafe.Pointer(&tfn) - m.Func = Value{mt.(*rtype), fn, fl} + m.Func = Value{&mt.(*rtype).t, fn, fl} m.Index = i return m @@ -640,12 +650,12 @@ func (t *rtype) PkgPath() string { return t.nameOff(ut.PkgPath).Name() } -func (t *rtype) hasName() bool { - return t.t.TFlag&abi.TFlagNamed != 0 +func pkgPathFor(t *abi.Type) string { + return toRType(t).PkgPath() } func (t *rtype) Name() string { - if !t.hasName() { + if !t.t.HasName() { return "" } s := t.String() @@ -663,6 +673,10 @@ func (t *rtype) Name() string { return s[i+1:] } +func nameFor(t *abi.Type) string { + return toRType(t).Name() +} + func (t *rtype) ChanDir() ChanDir { if t.Kind() != Chan { panic("reflect: ChanDir of non-chan type " + t.String()) @@ -675,25 +689,16 @@ func toRType(t *abi.Type) *rtype { return (*rtype)(unsafe.Pointer(t)) } -func (t *rtype) Elem() Type { - switch t.Kind() { - case Array: - tt := (*arrayType)(unsafe.Pointer(t)) - return toType(toRType(tt.Elem)) - case Chan: - tt := (*chanType)(unsafe.Pointer(t)) - return toType(toRType(tt.Elem)) - case Map: - tt := (*mapType)(unsafe.Pointer(t)) - return toType(tt.Elem) - case Pointer: - tt := (*ptrType)(unsafe.Pointer(t)) - return toType(tt.Elem) - case Slice: - tt := (*sliceType)(unsafe.Pointer(t)) - return toType(tt.Elem) +func elem(t *abi.Type) *abi.Type { + et := t.Elem() + if et != nil { + return et } - panic("reflect: Elem of invalid type " + t.String()) + panic("reflect: Elem of invalid type " + stringFor(t)) +} + +func (t *rtype) Elem() Type { + return toType(elem(t.common())) } func (t *rtype) Field(i int) StructField { @@ -757,7 +762,7 @@ func (t *rtype) In(i int) Type { panic("reflect: In of non-func type " + t.String()) } tt := (*abi.FuncType)(unsafe.Pointer(t)) - return toType(toRType(tt.InSlice()[i])) + return toType(tt.InSlice()[i]) } func (t *rtype) NumIn() int { @@ -781,7 +786,7 @@ func (t *rtype) Out(i int) Type { panic("reflect: Out of non-func type " + t.String()) } tt := (*abi.FuncType)(unsafe.Pointer(t)) - return toType(toRType(tt.OutSlice()[i])) + return toType(tt.OutSlice()[i]) } func (t *rtype) IsVariadic() bool { @@ -962,7 +967,7 @@ func (t *structType) Field(i int) (f StructField) { p := &t.Fields[i] f.Type = toType(p.Typ) f.Name = p.Name.Name() - f.Anonymous = p.embedded() + f.Anonymous = p.Embedded() if !p.Name.IsExported() { f.PkgPath = t.PkgPath.Name() } @@ -987,7 +992,7 @@ func (t *structType) Field(i int) (f StructField) { // FieldByIndex returns the nested field corresponding to index. func (t *structType) FieldByIndex(index []int) (f StructField) { - f.Type = toType(&t.rtype) + f.Type = toType(&t.Type) for i, x := range index { if i > 0 { ft := f.Type @@ -1058,12 +1063,12 @@ func (t *structType) FieldByNameFunc(match func(string) bool) (result StructFiel f := &t.Fields[i] // Find name and (for embedded field) type for field f. fname := f.Name.Name() - var ntyp *rtype - if f.embedded() { + var ntyp *abi.Type + if f.Embedded() { // Embedded field of type T or *T. ntyp = f.Typ - if ntyp.Kind() == Pointer { - ntyp = ntyp.Elem().common() + if ntyp.Kind() == abi.Pointer { + ntyp = ntyp.Elem() } } @@ -1085,7 +1090,7 @@ func (t *structType) FieldByNameFunc(match func(string) bool) (result StructFiel // Queue embedded struct fields for processing with next level, // but only if we haven't seen a match yet at this level and only // if the embedded types haven't already been queued. - if ok || ntyp == nil || ntyp.Kind() != Struct { + if ok || ntyp == nil || ntyp.Kind() != abi.Struct { continue } styp := (*structType)(unsafe.Pointer(ntyp)) @@ -1124,7 +1129,7 @@ func (t *structType) FieldByName(name string) (f StructField, present bool) { if tf.Name.Name() == name { return t.Field(i), true } - if tf.embedded() { + if tf.Embedded() { hasEmbeds = true } } @@ -1143,7 +1148,7 @@ func TypeOf(i any) Type { } // rtypeOf directly extracts the *rtype of the provided value. -func rtypeOf(i any) *rtype { +func rtypeOf(i any) *abi.Type { eface := *(*emptyInterface)(unsafe.Pointer(&i)) return eface.typ } @@ -1161,28 +1166,29 @@ func PtrTo(t Type) Type { return PointerTo(t) } // PointerTo returns the pointer type with element t. // For example, if t represents type Foo, PointerTo(t) represents *Foo. func PointerTo(t Type) Type { - return t.(*rtype).ptrTo() + return toRType(t.(*rtype).ptrTo()) } -func (t *rtype) ptrTo() *rtype { - if t.t.PtrToThis != 0 { - return t.typeOff(t.t.PtrToThis) +func (t *rtype) ptrTo() *abi.Type { + at := &t.t + if at.PtrToThis != 0 { + return t.typeOff(at.PtrToThis) } // Check the cache. if pi, ok := ptrMap.Load(t); ok { - return &pi.(*ptrType).rtype + return &pi.(*ptrType).Type } // Look in known types. s := "*" + t.String() for _, tt := range typesByString(s) { p := (*ptrType)(unsafe.Pointer(tt)) - if p.Elem != t { + if p.Elem != &t.t { continue } pi, _ := ptrMap.LoadOrStore(t, p) - return &pi.(*ptrType).rtype + return &pi.(*ptrType).Type } // Create a new ptrType starting with the description @@ -1191,20 +1197,24 @@ func (t *rtype) ptrTo() *rtype { prototype := *(**ptrType)(unsafe.Pointer(&iptr)) pp := *prototype - pp.t.Str = resolveReflectName(newName(s, "", false, false)) - pp.t.PtrToThis = 0 + pp.Str = resolveReflectName(newName(s, "", false, false)) + pp.PtrToThis = 0 // For the type structures linked into the binary, the // compiler provides a good hash of the string. // Create a good hash for the new string by using // the FNV-1 hash's mixing function to combine the // old hash and the new "*". - pp.t.Hash = fnv1(t.t.Hash, '*') + pp.Hash = fnv1(t.t.Hash, '*') - pp.Elem = t + pp.Elem = at pi, _ := ptrMap.LoadOrStore(t, &pp) - return &pi.(*ptrType).rtype + return &pi.(*ptrType).Type +} + +func ptrTo(t *abi.Type) *abi.Type { + return toRType(t).ptrTo() } // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. @@ -1222,23 +1232,22 @@ func (t *rtype) Implements(u Type) bool { if u.Kind() != Interface { panic("reflect: non-interface type passed to Type.Implements") } - return implements(u.(*rtype), t) + return implements(u.common(), t.common()) } func (t *rtype) AssignableTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.AssignableTo") } - uu := u.(*rtype) - return directlyAssignable(uu, t) || implements(uu, t) + uu := u.common() + return directlyAssignable(uu, t.common()) || implements(uu, t.common()) } func (t *rtype) ConvertibleTo(u Type) bool { if u == nil { panic("reflect: nil type passed to Type.ConvertibleTo") } - uu := u.(*rtype) - return convertOp(uu, t) != nil + return convertOp(u.common(), t.common()) != nil } func (t *rtype) Comparable() bool { @@ -1246,8 +1255,8 @@ func (t *rtype) Comparable() bool { } // implements reports whether the type V implements the interface type T. -func implements(T, V *rtype) bool { - if T.Kind() != Interface { +func implements(T, V *abi.Type) bool { + if T.Kind() != abi.Interface { return false } t := (*interfaceType)(unsafe.Pointer(T)) @@ -1267,15 +1276,15 @@ func implements(T, V *rtype) bool { // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. // See also ../runtime/iface.go. - if V.Kind() == Interface { + if V.Kind() == abi.Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 for j := 0; j < len(v.Methods); j++ { tm := &t.Methods[i] tmName := t.nameOff(tm.Name) vm := &v.Methods[j] - vmName := V.nameOff(vm.Name) - if vmName.Name() == tmName.Name() && V.typeOff(vm.Typ) == t.typeOff(tm.Typ) { + vmName := nameOffFor(V, vm.Name) + if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) { if !tmName.IsExported() { tmPkgPath := pkgPath(tmName) if tmPkgPath == "" { @@ -1297,7 +1306,7 @@ func implements(T, V *rtype) bool { return false } - v := V.uncommon() + v := V.Uncommon() if v == nil { return false } @@ -1307,8 +1316,8 @@ func implements(T, V *rtype) bool { tm := &t.Methods[i] tmName := t.nameOff(tm.Name) vm := vmethods[j] - vmName := V.nameOff(vm.Name) - if vmName.Name() == tmName.Name() && V.typeOff(vm.Mtyp) == t.typeOff(tm.Typ) { + vmName := nameOffFor(V, vm.Name) + if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) { if !tmName.IsExported() { tmPkgPath := pkgPath(tmName) if tmPkgPath == "" { @@ -1316,7 +1325,7 @@ func implements(T, V *rtype) bool { } vmPkgPath := pkgPath(vmName) if vmPkgPath == "" { - vmPkgPath = V.nameOff(v.PkgPath).Name() + vmPkgPath = nameOffFor(V, v.PkgPath).Name() } if tmPkgPath != vmPkgPath { continue @@ -1334,12 +1343,12 @@ func implements(T, V *rtype) bool { // can be directly assigned (using memmove) to another channel type T. // https://golang.org/doc/go_spec.html#Assignability // T and V must be both of Chan kind. -func specialChannelAssignability(T, V *rtype) bool { +func specialChannelAssignability(T, V *abi.Type) bool { // Special case: // x is a bidirectional channel value, T is a channel type, // x's type V and T have identical element types, // and at least one of V or T is not a defined type. - return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true) + return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true) } // directlyAssignable reports whether a value x of type V can be directly @@ -1347,7 +1356,7 @@ func specialChannelAssignability(T, V *rtype) bool { // https://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). -func directlyAssignable(T, V *rtype) bool { +func directlyAssignable(T, V *abi.Type) bool { // x's type V is identical to T? if T == V { return true @@ -1355,11 +1364,11 @@ func directlyAssignable(T, V *rtype) bool { // Otherwise at least one of T and V must not be defined // and they must have the same kind. - if T.hasName() && V.hasName() || T.Kind() != V.Kind() { + if T.HasName() && V.HasName() || T.Kind() != V.Kind() { return false } - if T.Kind() == Chan && specialChannelAssignability(T, V) { + if T.Kind() == abi.Chan && specialChannelAssignability(T, V) { return true } @@ -1367,25 +1376,25 @@ func directlyAssignable(T, V *rtype) bool { return haveIdenticalUnderlyingType(T, V, true) } -func haveIdenticalType(T, V Type, cmpTags bool) bool { +func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool { if cmpTags { return T == V } - if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() { + if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) { return false } - return haveIdenticalUnderlyingType(T.common(), V.common(), false) + return haveIdenticalUnderlyingType(T, V, false) } -func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { +func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool { if T == V { return true } - kind := T.Kind() - if kind != V.Kind() { + kind := Kind(T.Kind()) + if kind != Kind(V.Kind()) { return false } @@ -1410,12 +1419,12 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { return false } for i := 0; i < t.NumIn(); i++ { - if !haveIdenticalType(toRType(t.In(i)), toRType(v.In(i)), cmpTags) { + if !haveIdenticalType(t.In(i), v.In(i), cmpTags) { return false } } for i := 0; i < t.NumOut(); i++ { - if !haveIdenticalType(toRType(t.Out(i)), toRType(v.Out(i)), cmpTags) { + if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) { return false } } @@ -1461,7 +1470,7 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { if tf.Offset != vf.Offset { return false } - if tf.embedded() != vf.embedded() { + if tf.Embedded() != vf.Embedded() { return false } } @@ -1492,17 +1501,17 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { // pointers, channels, maps, slices, and arrays. func typelinks() (sections []unsafe.Pointer, offset [][]int32) -func rtypeOff(section unsafe.Pointer, off int32) *rtype { - return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0")) +func rtypeOff(section unsafe.Pointer, off int32) *abi.Type { + return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0")) } // typesByString returns the subslice of typelinks() whose elements have // the given string representation. // It may be empty (no known types with that string) or may have // multiple elements (multiple types with that string). -func typesByString(s string) []*rtype { +func typesByString(s string) []*abi.Type { sections, offset := typelinks() - var ret []*rtype + var ret []*abi.Type for offsI, offs := range offset { section := sections[offsI] @@ -1513,7 +1522,7 @@ func typesByString(s string) []*rtype { for i < j { h := i + (j-i)>>1 // avoid overflow when computing h // i ≤ h < j - if !(rtypeOff(section, offs[h]).String() >= s) { + if !(stringFor(rtypeOff(section, offs[h])) >= s) { i = h + 1 // preserves f(i-1) == false } else { j = h // preserves f(j) == true @@ -1526,7 +1535,7 @@ func typesByString(s string) []*rtype { // to do a linear scan anyway. for j := i; j < len(offs); j++ { typ := rtypeOff(section, offs[j]) - if typ.String() != s { + if stringFor(typ) != s { break } ret = append(ret, typ) @@ -1543,8 +1552,8 @@ var lookupCache sync.Map // map[cacheKey]*rtype // type kind, one or two subtypes, and an extra integer. type cacheKey struct { kind Kind - t1 *rtype - t2 *rtype + t1 *abi.Type + t2 *abi.Type extra uintptr } @@ -1565,7 +1574,7 @@ var funcLookupCache struct { // The gc runtime imposes a limit of 64 kB on channel element types. // If t's size is equal to or exceeds this limit, ChanOf panics. func ChanOf(dir ChanDir, t Type) Type { - typ := t.(*rtype) + typ := t.common() // Look in cache. ckey := cacheKey{Chan, typ, nil, uintptr(dir)} @@ -1574,7 +1583,7 @@ func ChanOf(dir ChanDir, t Type) Type { } // This restriction is imposed by the gc compiler and the runtime. - if typ.t.Size_ >= 1<<16 { + if typ.Size_ >= 1<<16 { panic("reflect.ChanOf: element size too large") } @@ -1584,11 +1593,11 @@ func ChanOf(dir ChanDir, t Type) Type { default: panic("reflect.ChanOf: invalid dir") case SendDir: - s = "chan<- " + typ.String() + s = "chan<- " + stringFor(typ) case RecvDir: - s = "<-chan " + typ.String() + s = "<-chan " + stringFor(typ) case BothDir: - typeStr := typ.String() + typeStr := stringFor(typ) if typeStr[0] == '<' { // typ is recv chan, need parentheses as "<-" associates with leftmost // chan possible, see: @@ -1601,8 +1610,8 @@ func ChanOf(dir ChanDir, t Type) Type { } for _, tt := range typesByString(s) { ch := (*chanType)(unsafe.Pointer(tt)) - if ch.Elem == &typ.t && ch.Dir == abi.ChanDir(dir) { - ti, _ := lookupCache.LoadOrStore(ckey, tt) + if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) return ti.(Type) } } @@ -1614,8 +1623,8 @@ func ChanOf(dir ChanDir, t Type) Type { ch.TFlag = abi.TFlagRegularMemory ch.Dir = abi.ChanDir(dir) ch.Str = resolveReflectName(newName(s, "", false, false)) - ch.Hash = fnv1(typ.t.Hash, 'c', byte(dir)) - ch.Elem = &typ.t + ch.Hash = fnv1(typ.Hash, 'c', byte(dir)) + ch.Elem = typ ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type)) return ti.(Type) @@ -1628,11 +1637,11 @@ func ChanOf(dir ChanDir, t Type) Type { // If the key type is not a valid map key type (that is, if it does // not implement Go's == operator), MapOf panics. func MapOf(key, elem Type) Type { - ktyp := key.(*rtype) - etyp := elem.(*rtype) + ktyp := key.common() + etyp := elem.common() - if ktyp.t.Equal == nil { - panic("reflect.MapOf: invalid key type " + ktyp.String()) + if ktyp.Equal == nil { + panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) } // Look in cache. @@ -1642,11 +1651,11 @@ func MapOf(key, elem Type) Type { } // Look in known types. - s := "map[" + ktyp.String() + "]" + etyp.String() + s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) for _, tt := range typesByString(s) { mt := (*mapType)(unsafe.Pointer(tt)) if mt.Key == ktyp && mt.Elem == etyp { - ti, _ := lookupCache.LoadOrStore(ckey, tt) + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) return ti.(Type) } } @@ -1656,9 +1665,9 @@ func MapOf(key, elem Type) Type { // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) mt := **(**mapType)(unsafe.Pointer(&imap)) - mt.t.Str = resolveReflectName(newName(s, "", false, false)) - mt.t.TFlag = 0 - mt.t.Hash = fnv1(etyp.t.Hash, 'm', byte(ktyp.t.Hash>>24), byte(ktyp.t.Hash>>16), byte(ktyp.t.Hash>>8), byte(ktyp.t.Hash)) + mt.Str = resolveReflectName(newName(s, "", false, false)) + mt.TFlag = 0 + mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) mt.Key = ktyp mt.Elem = etyp mt.Bucket = bucketOf(ktyp, etyp) @@ -1666,19 +1675,19 @@ func MapOf(key, elem Type) Type { return typehash(ktyp, p, seed) } mt.Flags = 0 - if ktyp.t.Size_ > maxKeySize { - mt.Keysize = uint8(goarch.PtrSize) + if ktyp.Size_ > maxKeySize { + mt.KeySize = uint8(goarch.PtrSize) mt.Flags |= 1 // indirect key } else { - mt.Keysize = uint8(ktyp.t.Size_) + mt.KeySize = uint8(ktyp.Size_) } - if etyp.t.Size_ > maxValSize { - mt.Valuesize = uint8(goarch.PtrSize) + if etyp.Size_ > maxValSize { + mt.ValueSize = uint8(goarch.PtrSize) mt.Flags |= 2 // indirect value } else { - mt.Valuesize = uint8(etyp.t.Size_) + mt.MapType.ValueSize = uint8(etyp.Size_) } - mt.Bucketsize = uint16(mt.Bucket.t.Size_) + mt.MapType.BucketSize = uint16(mt.Bucket.Size_) if isReflexive(ktyp) { mt.Flags |= 4 } @@ -1688,9 +1697,9 @@ func MapOf(key, elem Type) Type { if hashMightPanic(ktyp) { mt.Flags |= 16 } - mt.t.PtrToThis = 0 + mt.PtrToThis = 0 - ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) return ti.(Type) } @@ -1775,9 +1784,9 @@ func FuncOf(in, out []Type, variadic bool) Type { // Look in cache. if ts, ok := funcLookupCache.m.Load(hash); ok { - for _, t := range ts.([]*rtype) { - if haveIdenticalUnderlyingType(toRType(&ft.Type), t, true) { - return t + for _, t := range ts.([]*abi.Type) { + if haveIdenticalUnderlyingType(&ft.Type, t, true) { + return toRType(t) } } } @@ -1786,26 +1795,26 @@ func FuncOf(in, out []Type, variadic bool) Type { funcLookupCache.Lock() defer funcLookupCache.Unlock() if ts, ok := funcLookupCache.m.Load(hash); ok { - for _, t := range ts.([]*rtype) { - if haveIdenticalUnderlyingType(toRType(&ft.Type), t, true) { - return t + for _, t := range ts.([]*abi.Type) { + if haveIdenticalUnderlyingType(&ft.Type, t, true) { + return toRType(t) } } } - addToCache := func(tt *rtype) Type { - var rts []*rtype + addToCache := func(tt *abi.Type) Type { + var rts []*abi.Type if rti, ok := funcLookupCache.m.Load(hash); ok { - rts = rti.([]*rtype) + rts = rti.([]*abi.Type) } funcLookupCache.m.Store(hash, append(rts, tt)) - return tt + return toType(tt) } // Look in known types for the same string representation. str := funcStr(ft) for _, tt := range typesByString(str) { - if haveIdenticalUnderlyingType(toRType(&ft.Type), tt, true) { + if haveIdenticalUnderlyingType(&ft.Type, tt, true) { return addToCache(tt) } } @@ -1813,7 +1822,7 @@ func FuncOf(in, out []Type, variadic bool) Type { // Populate the remaining fields of ft and store in cache. ft.Str = resolveReflectName(newName(str, "", false, false)) ft.PtrToThis = 0 - return addToCache(toRType(&ft.Type)) + return addToCache(&ft.Type) } func stringFor(t *abi.Type) string { return toRType(t).String() @@ -1829,7 +1838,7 @@ func funcStr(ft *funcType) string { } if ft.IsVariadic() && i == int(ft.InCount)-1 { repr = append(repr, "..."...) - repr = append(repr, (*sliceType)(unsafe.Pointer(t)).Elem.String()...) + repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...) } else { repr = append(repr, stringFor(t)...) } @@ -1855,15 +1864,15 @@ func funcStr(ft *funcType) string { // isReflexive reports whether the == operation on the type is reflexive. // That is, x == x for all values x of type t. -func isReflexive(t *rtype) bool { - switch t.Kind() { +func isReflexive(t *abi.Type) bool { + switch Kind(t.Kind()) { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: return true case Float32, Float64, Complex64, Complex128, Interface: return false case Array: tt := (*arrayType)(unsafe.Pointer(t)) - return isReflexive(toRType(tt.Elem)) + return isReflexive(tt.Elem) case Struct: tt := (*structType)(unsafe.Pointer(t)) for _, f := range tt.Fields { @@ -1874,13 +1883,13 @@ func isReflexive(t *rtype) bool { return true default: // Func, Map, Slice, Invalid - panic("isReflexive called on non-key type " + t.String()) + panic("isReflexive called on non-key type " + stringFor(t)) } } // needKeyUpdate reports whether map overwrites require the key to be copied. -func needKeyUpdate(t *rtype) bool { - switch t.Kind() { +func needKeyUpdate(t *abi.Type) bool { + switch Kind(t.Kind()) { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: return false case Float32, Float64, Complex64, Complex128, Interface, String: @@ -1890,7 +1899,7 @@ func needKeyUpdate(t *rtype) bool { return true case Array: tt := (*arrayType)(unsafe.Pointer(t)) - return needKeyUpdate(toRType(tt.Elem)) + return needKeyUpdate(tt.Elem) case Struct: tt := (*structType)(unsafe.Pointer(t)) for _, f := range tt.Fields { @@ -1901,18 +1910,18 @@ func needKeyUpdate(t *rtype) bool { return false default: // Func, Map, Slice, Invalid - panic("needKeyUpdate called on non-key type " + t.String()) + panic("needKeyUpdate called on non-key type " + stringFor(t)) } } // hashMightPanic reports whether the hash of a map key of type t might panic. -func hashMightPanic(t *rtype) bool { - switch t.Kind() { +func hashMightPanic(t *abi.Type) bool { + switch Kind(t.Kind()) { case Interface: return true case Array: tt := (*arrayType)(unsafe.Pointer(t)) - return hashMightPanic(toRType(tt.Elem)) + return hashMightPanic(tt.Elem) case Struct: tt := (*structType)(unsafe.Pointer(t)) for _, f := range tt.Fields { @@ -1936,12 +1945,12 @@ const ( maxValSize uintptr = abi.MapMaxElemBytes ) -func bucketOf(ktyp, etyp *rtype) *rtype { - if ktyp.t.Size_ > maxKeySize { - ktyp = PointerTo(ktyp).(*rtype) +func bucketOf(ktyp, etyp *abi.Type) *abi.Type { + if ktyp.Size_ > maxKeySize { + ktyp = ptrTo(ktyp) } - if etyp.t.Size_ > maxValSize { - etyp = PointerTo(etyp).(*rtype) + if etyp.Size_ > maxValSize { + etyp = ptrTo(etyp) } // Prepare GC data if any. @@ -1952,13 +1961,13 @@ func bucketOf(ktyp, etyp *rtype) *rtype { var gcdata *byte var ptrdata uintptr - size := bucketSize*(1+ktyp.t.Size_+etyp.t.Size_) + goarch.PtrSize - if size&uintptr(ktyp.t.Align_-1) != 0 || size&uintptr(etyp.t.Align_-1) != 0 { + size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize + if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { panic("reflect: bad size computation in MapOf") } - if ktyp.t.PtrBytes != 0 || etyp.t.PtrBytes != 0 { - nptr := (bucketSize*(1+ktyp.t.Size_+etyp.t.Size_) + goarch.PtrSize) / goarch.PtrSize + if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 { + nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize n := (nptr + 7) / 8 // Runtime needs pointer masks to be a multiple of uintptr in size. @@ -1966,15 +1975,15 @@ func bucketOf(ktyp, etyp *rtype) *rtype { mask := make([]byte, n) base := bucketSize / goarch.PtrSize - if ktyp.t.PtrBytes != 0 { + if ktyp.PtrBytes != 0 { emitGCMask(mask, base, ktyp, bucketSize) } - base += bucketSize * ktyp.t.Size_ / goarch.PtrSize + base += bucketSize * ktyp.Size_ / goarch.PtrSize - if etyp.t.PtrBytes != 0 { + if etyp.PtrBytes != 0 { emitGCMask(mask, base, etyp, bucketSize) } - base += bucketSize * etyp.t.Size_ / goarch.PtrSize + base += bucketSize * etyp.Size_ / goarch.PtrSize word := base mask[word/8] |= 1 << (word % 8) @@ -1987,15 +1996,15 @@ func bucketOf(ktyp, etyp *rtype) *rtype { } } - b := &rtype{abi.Type{ + b := &abi.Type{ Align_: goarch.PtrSize, Size_: size, Kind_: uint8(Struct), PtrBytes: ptrdata, GCData: gcdata, - }} - s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" - b.t.Str = resolveReflectName(newName(s, "", false, false)) + } + s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")" + b.Str = resolveReflectName(newName(s, "", false, false)) return b } @@ -2005,13 +2014,13 @@ func (t *rtype) gcSlice(begin, end uintptr) []byte { // emitGCMask writes the GC mask for [n]typ into out, starting at bit // offset base. -func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { - if typ.t.Kind_&kindGCProg != 0 { +func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) { + if typ.Kind_&kindGCProg != 0 { panic("reflect: unexpected GC program") } - ptrs := typ.t.PtrBytes / goarch.PtrSize - words := typ.t.Size_ / goarch.PtrSize - mask := typ.gcSlice(0, (ptrs+7)/8) + ptrs := typ.PtrBytes / goarch.PtrSize + words := typ.Size_ / goarch.PtrSize + mask := typ.GcSlice(0, (ptrs+7)/8) for j := uintptr(0); j < ptrs; j++ { if (mask[j/8]>>(j%8))&1 != 0 { for i := uintptr(0); i < n; i++ { @@ -2024,17 +2033,17 @@ func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) { // appendGCProg appends the GC program for the first ptrdata bytes of // typ to dst and returns the extended slice. -func appendGCProg(dst []byte, typ *rtype) []byte { - if typ.t.Kind_&kindGCProg != 0 { +func appendGCProg(dst []byte, typ *abi.Type) []byte { + if typ.Kind_&kindGCProg != 0 { // Element has GC program; emit one element. - n := uintptr(*(*uint32)(unsafe.Pointer(typ.t.GCData))) - prog := typ.gcSlice(4, 4+n-1) + n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData))) + prog := typ.GcSlice(4, 4+n-1) return append(dst, prog...) } // Element is small with pointer mask; use as literal bits. - ptrs := typ.t.PtrBytes / goarch.PtrSize - mask := typ.gcSlice(0, (ptrs+7)/8) + ptrs := typ.PtrBytes / goarch.PtrSize + mask := typ.GcSlice(0, (ptrs+7)/8) // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). for ; ptrs > 120; ptrs -= 120 { @@ -2051,7 +2060,7 @@ func appendGCProg(dst []byte, typ *rtype) []byte { // SliceOf returns the slice type with element type t. // For example, if t represents int, SliceOf(t) represents []int. func SliceOf(t Type) Type { - typ := t.(*rtype) + typ := t.common() // Look in cache. ckey := cacheKey{Slice, typ, nil, 0} @@ -2060,11 +2069,11 @@ func SliceOf(t Type) Type { } // Look in known types. - s := "[]" + typ.String() + s := "[]" + stringFor(typ) for _, tt := range typesByString(s) { slice := (*sliceType)(unsafe.Pointer(tt)) if slice.Elem == typ { - ti, _ := lookupCache.LoadOrStore(ckey, tt) + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) return ti.(Type) } } @@ -2073,13 +2082,13 @@ func SliceOf(t Type) Type { var islice any = ([]unsafe.Pointer)(nil) prototype := *(**sliceType)(unsafe.Pointer(&islice)) slice := *prototype - slice.t.TFlag = 0 - slice.t.Str = resolveReflectName(newName(s, "", false, false)) - slice.t.Hash = fnv1(typ.t.Hash, '[') + slice.TFlag = 0 + slice.Str = resolveReflectName(newName(s, "", false, false)) + slice.Hash = fnv1(typ.Hash, '[') slice.Elem = typ - slice.t.PtrToThis = 0 + slice.PtrToThis = 0 - ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type)) return ti.(Type) } @@ -2161,7 +2170,7 @@ func StructOf(fields []StructField) Type { } f, fpkgpath := runtimeStructField(field) ft := f.Typ - if ft.t.Kind_&kindGCProg != 0 { + if ft.Kind_&kindGCProg != 0 { hasGCProg = true } if fpkgpath != "" { @@ -2176,17 +2185,17 @@ func StructOf(fields []StructField) Type { name := f.Name.Name() hash = fnv1(hash, []byte(name)...) repr = append(repr, (" " + name)...) - if f.embedded() { + if f.Embedded() { // Embedded field - if f.Typ.Kind() == Pointer { + if f.Typ.Kind() == abi.Pointer { // Embedded ** and *interface{} are illegal elem := ft.Elem() - if k := elem.Kind(); k == Pointer || k == Interface { - panic("reflect.StructOf: illegal embedded field type " + ft.String()) + if k := elem.Kind(); k == abi.Pointer || k == abi.Interface { + panic("reflect.StructOf: illegal embedded field type " + stringFor(ft)) } } - switch f.Typ.Kind() { + switch Kind(f.Typ.Kind()) { case Interface: ift := (*interfaceType)(unsafe.Pointer(ft)) for im, m := range ift.Methods { @@ -2203,8 +2212,8 @@ func StructOf(fields []StructField) Type { tfn Value ) - if ft.t.Kind_&kindDirectIface != 0 { - tfn = MakeFunc(mtyp, func(in []Value) []Value { + if ft.Kind_&kindDirectIface != 0 { + tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2212,7 +2221,7 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - ifn = MakeFunc(mtyp, func(in []Value) []Value { + ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2221,7 +2230,7 @@ func StructOf(fields []StructField) Type { return recv.Field(ifield).Method(imethod).Call(args) }) } else { - tfn = MakeFunc(mtyp, func(in []Value) []Value { + tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2229,7 +2238,7 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - ifn = MakeFunc(mtyp, func(in []Value) []Value { + ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value { var args []Value var recv = Indirect(in[0]) if len(in) > 1 { @@ -2248,7 +2257,7 @@ func StructOf(fields []StructField) Type { } case Pointer: ptr := (*ptrType)(unsafe.Pointer(ft)) - if unt := ptr.uncommon(); unt != nil { + if unt := ptr.Uncommon(); unt != nil { if i > 0 && unt.Mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") @@ -2257,7 +2266,7 @@ func StructOf(fields []StructField) Type { panic("reflect: embedded type with methods not implemented if there is more than one field") } for _, m := range unt.Methods() { - mname := ptr.nameOff(m.Name) + mname := nameOffFor(ft, m.Name) if pkgPath(mname) != "" { // TODO(sbinet). // Issue 15924. @@ -2265,15 +2274,15 @@ func StructOf(fields []StructField) Type { } methods = append(methods, abi.Method{ Name: resolveReflectName(mname), - Mtyp: resolveReflectType(ptr.typeOff(m.Mtyp)), - Ifn: resolveReflectText(ptr.textOff(m.Ifn)), - Tfn: resolveReflectText(ptr.textOff(m.Tfn)), + Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), }) } } - if unt := ptr.Elem.uncommon(); unt != nil { + if unt := ptr.Elem.Uncommon(); unt != nil { for _, m := range unt.Methods() { - mname := ptr.nameOff(m.Name) + mname := nameOffFor(ft, m.Name) if pkgPath(mname) != "" { // TODO(sbinet) // Issue 15924. @@ -2281,23 +2290,23 @@ func StructOf(fields []StructField) Type { } methods = append(methods, abi.Method{ Name: resolveReflectName(mname), - Mtyp: resolveReflectType(ptr.Elem.typeOff(m.Mtyp)), - Ifn: resolveReflectText(ptr.Elem.textOff(m.Ifn)), - Tfn: resolveReflectText(ptr.Elem.textOff(m.Tfn)), + Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)), }) } } default: - if unt := ft.uncommon(); unt != nil { + if unt := ft.Uncommon(); unt != nil { if i > 0 && unt.Mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } - if len(fields) > 1 && ft.t.Kind_&kindDirectIface != 0 { + if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 { panic("reflect: embedded type with methods not implemented for non-pointer type") } for _, m := range unt.Methods() { - mname := ft.nameOff(m.Name) + mname := nameOffFor(ft, m.Name) if pkgPath(mname) != "" { // TODO(sbinet) // Issue 15924. @@ -2305,9 +2314,9 @@ func StructOf(fields []StructField) Type { } methods = append(methods, abi.Method{ Name: resolveReflectName(mname), - Mtyp: resolveReflectType(ft.typeOff(m.Mtyp)), - Ifn: resolveReflectText(ft.textOff(m.Ifn)), - Tfn: resolveReflectText(ft.textOff(m.Tfn)), + Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)), + Ifn: resolveReflectText(textOffFor(ft, m.Ifn)), + Tfn: resolveReflectText(textOffFor(ft, m.Tfn)), }) } @@ -2319,9 +2328,9 @@ func StructOf(fields []StructField) Type { } fset[name] = struct{}{} - hash = fnv1(hash, byte(ft.t.Hash>>24), byte(ft.t.Hash>>16), byte(ft.t.Hash>>8), byte(ft.t.Hash)) + hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash)) - repr = append(repr, (" " + ft.String())...) + repr = append(repr, (" " + stringFor(ft))...) if f.Name.HasTag() { hash = fnv1(hash, []byte(f.Name.Tag())...) repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...) @@ -2330,22 +2339,22 @@ func StructOf(fields []StructField) Type { repr = append(repr, ';') } - comparable = comparable && (ft.t.Equal != nil) + comparable = comparable && (ft.Equal != nil) - offset := align(size, uintptr(ft.t.Align_)) + offset := align(size, uintptr(ft.Align_)) if offset < size { panic("reflect.StructOf: struct size would exceed virtual address space") } - if ft.t.Align_ > typalign { - typalign = ft.t.Align_ + if ft.Align_ > typalign { + typalign = ft.Align_ } - size = offset + ft.t.Size_ + size = offset + ft.Size_ if size < offset { panic("reflect.StructOf: struct size would exceed virtual address space") } f.Offset = offset - if ft.t.Size_ == 0 { + if ft.Size_ == 0 { lastzero = size } @@ -2423,8 +2432,8 @@ func StructOf(fields []StructField) Type { if ts, ok := structLookupCache.m.Load(hash); ok { for _, st := range ts.([]Type) { t := st.common() - if haveIdenticalUnderlyingType(&typ.rtype, t, true) { - return t + if haveIdenticalUnderlyingType(&typ.Type, t, true) { + return toType(t) } } } @@ -2435,8 +2444,8 @@ func StructOf(fields []StructField) Type { if ts, ok := structLookupCache.m.Load(hash); ok { for _, st := range ts.([]Type) { t := st.common() - if haveIdenticalUnderlyingType(&typ.rtype, t, true) { - return t + if haveIdenticalUnderlyingType(&typ.Type, t, true) { + return toType(t) } } } @@ -2452,30 +2461,30 @@ func StructOf(fields []StructField) Type { // Look in known types. for _, t := range typesByString(str) { - if haveIdenticalUnderlyingType(&typ.rtype, t, true) { + if haveIdenticalUnderlyingType(&typ.Type, t, true) { // even if 't' wasn't a structType with methods, we should be ok // as the 'u uncommonType' field won't be accessed except when // tflag&abi.TFlagUncommon is set. - return addToCache(t) + return addToCache(toType(t)) } } - typ.t.Str = resolveReflectName(newName(str, "", false, false)) - typ.t.TFlag = 0 // TODO: set tflagRegularMemory - typ.t.Hash = hash - typ.t.Size_ = size - typ.t.PtrBytes = typeptrdata(typ.common()) - typ.t.Align_ = typalign - typ.t.FieldAlign_ = typalign - typ.t.PtrToThis = 0 + typ.Str = resolveReflectName(newName(str, "", false, false)) + typ.TFlag = 0 // TODO: set tflagRegularMemory + typ.Hash = hash + typ.Size_ = size + typ.PtrBytes = typeptrdata(&typ.Type) + typ.Align_ = typalign + typ.FieldAlign_ = typalign + typ.PtrToThis = 0 if len(methods) > 0 { - typ.t.TFlag |= abi.TFlagUncommon + typ.TFlag |= abi.TFlagUncommon } if hasGCProg { lastPtrField := 0 for i, ft := range fs { - if ft.Typ.pointers() { + if ft.Typ.Pointers() { lastPtrField = i } } @@ -2487,7 +2496,7 @@ func StructOf(fields []StructField) Type { // the last field that contains pointer data break } - if !ft.Typ.pointers() { + if !ft.Typ.Pointers() { // Ignore pointerless fields. continue } @@ -2503,27 +2512,27 @@ func StructOf(fields []StructField) Type { } prog = appendGCProg(prog, ft.Typ) - off += ft.Typ.t.PtrBytes + off += ft.Typ.PtrBytes } prog = append(prog, 0) *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4) - typ.t.Kind_ |= kindGCProg - typ.t.GCData = &prog[0] + typ.Kind_ |= kindGCProg + typ.GCData = &prog[0] } else { - typ.t.Kind_ &^= kindGCProg + typ.Kind_ &^= kindGCProg bv := new(bitVector) - addTypeBits(bv, 0, typ.common()) + addTypeBits(bv, 0, &typ.Type) if len(bv.data) > 0 { - typ.t.GCData = &bv.data[0] + typ.GCData = &bv.data[0] } } - typ.t.Equal = nil + typ.Equal = nil if comparable { - typ.t.Equal = func(p, q unsafe.Pointer) bool { + typ.Equal = func(p, q unsafe.Pointer) bool { for _, ft := range typ.Fields { pi := add(p, ft.Offset, "&x.field safe") qi := add(q, ft.Offset, "&x.field safe") - if !ft.Typ.t.Equal(pi, qi) { + if !ft.Typ.Equal(pi, qi) { return false } } @@ -2534,12 +2543,12 @@ func StructOf(fields []StructField) Type { switch { case len(fs) == 1 && !ifaceIndir(fs[0].Typ): // structs of 1 direct iface type can be direct - typ.t.Kind_ |= kindDirectIface + typ.Kind_ |= kindDirectIface default: - typ.t.Kind_ &^= kindDirectIface + typ.Kind_ &^= kindDirectIface } - return addToCache(&typ.rtype) + return addToCache(toType(&typ.Type)) } // runtimeStructField takes a StructField value passed to StructOf and @@ -2571,15 +2580,15 @@ func runtimeStructField(field StructField) (structField, string) { // typeptrdata returns the length in bytes of the prefix of t // containing pointer data. Anything after this offset is scalar data. // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go -func typeptrdata(t *rtype) uintptr { +func typeptrdata(t *abi.Type) uintptr { switch t.Kind() { - case Struct: + case abi.Struct: st := (*structType)(unsafe.Pointer(t)) // find the last field that has pointers. field := -1 for i := range st.Fields { ft := st.Fields[i].Typ - if ft.pointers() { + if ft.Pointers() { field = i } } @@ -2587,10 +2596,10 @@ func typeptrdata(t *rtype) uintptr { return 0 } f := st.Fields[field] - return f.Offset + f.Typ.t.PtrBytes + return f.Offset + f.Typ.PtrBytes default: - panic("reflect.typeptrdata: unexpected type, " + t.String()) + panic("reflect.typeptrdata: unexpected type, " + stringFor(t)) } } @@ -2607,7 +2616,7 @@ func ArrayOf(length int, elem Type) Type { panic("reflect: negative length passed to ArrayOf") } - typ := elem.(*rtype) + typ := elem.common() // Look in cache. ckey := cacheKey{Array, typ, nil, uintptr(length)} @@ -2616,11 +2625,11 @@ func ArrayOf(length int, elem Type) Type { } // Look in known types. - s := "[" + strconv.Itoa(length) + "]" + typ.String() + s := "[" + strconv.Itoa(length) + "]" + stringFor(typ) for _, tt := range typesByString(s) { array := (*arrayType)(unsafe.Pointer(tt)) - if toRType(array.Elem) == typ { - ti, _ := lookupCache.LoadOrStore(ckey, tt) + if array.Elem == typ { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) return ti.(Type) } } @@ -2629,43 +2638,43 @@ func ArrayOf(length int, elem Type) Type { var iarray any = [1]unsafe.Pointer{} prototype := *(**arrayType)(unsafe.Pointer(&iarray)) array := *prototype - array.TFlag = typ.t.TFlag & abi.TFlagRegularMemory + array.TFlag = typ.TFlag & abi.TFlagRegularMemory array.Str = resolveReflectName(newName(s, "", false, false)) - array.Hash = fnv1(typ.t.Hash, '[') + array.Hash = fnv1(typ.Hash, '[') for n := uint32(length); n > 0; n >>= 8 { array.Hash = fnv1(array.Hash, byte(n)) } array.Hash = fnv1(array.Hash, ']') - array.Elem = &(typ.t) + array.Elem = typ array.PtrToThis = 0 - if typ.t.Size_ > 0 { - max := ^uintptr(0) / typ.t.Size_ + if typ.Size_ > 0 { + max := ^uintptr(0) / typ.Size_ if uintptr(length) > max { panic("reflect.ArrayOf: array size would exceed virtual address space") } } - array.Size_ = typ.t.Size_ * uintptr(length) - if length > 0 && typ.t.PtrBytes != 0 { - array.PtrBytes = typ.t.Size_*uintptr(length-1) + typ.t.PtrBytes + array.Size_ = typ.Size_ * uintptr(length) + if length > 0 && typ.PtrBytes != 0 { + array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes } - array.Align_ = typ.t.Align_ - array.FieldAlign_ = typ.t.FieldAlign_ + array.Align_ = typ.Align_ + array.FieldAlign_ = typ.FieldAlign_ array.Len = uintptr(length) array.Slice = &(SliceOf(elem).(*rtype).t) switch { - case typ.t.PtrBytes == 0 || array.Size_ == 0: + case typ.PtrBytes == 0 || array.Size_ == 0: // No pointers. array.GCData = nil array.PtrBytes = 0 case length == 1: // In memory, 1-element array looks just like the element. - array.Kind_ |= typ.t.Kind_ & kindGCProg - array.GCData = typ.t.GCData - array.PtrBytes = typ.t.PtrBytes + array.Kind_ |= typ.Kind_ & kindGCProg + array.GCData = typ.GCData + array.PtrBytes = typ.PtrBytes - case typ.t.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: + case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: // Element is small with pointer mask; array is still small. // Create direct pointer mask by turning each 1 bit in elem // into length 1 bits in larger mask. @@ -2682,8 +2691,8 @@ func ArrayOf(length int, elem Type) Type { prog := []byte{0, 0, 0, 0} // will be length of prog prog = appendGCProg(prog, typ) // Pad from ptrdata to size. - elemPtrs := typ.t.PtrBytes / goarch.PtrSize - elemWords := typ.t.Size_ / goarch.PtrSize + elemPtrs := typ.PtrBytes / goarch.PtrSize + elemWords := typ.Size_ / goarch.PtrSize if elemPtrs < elemWords { // Emit literal 0 bit, then repeat as needed. prog = append(prog, 0x01, 0x00) @@ -2707,11 +2716,11 @@ func ArrayOf(length int, elem Type) Type { array.PtrBytes = array.Size_ // overestimate but ok; must match program } - etyp := typ.common() + etyp := typ esize := etyp.Size() array.Equal = nil - if eequal := etyp.t.Equal; eequal != nil { + if eequal := etyp.Equal; eequal != nil { array.Equal = func(p, q unsafe.Pointer) bool { for i := 0; i < length; i++ { pi := arrayAt(p, i, esize, "i < length") @@ -2750,20 +2759,20 @@ func appendVarint(x []byte, v uintptr) []byte { // a nil *rtype must be replaced by a nil Type, but in gccgo this // function takes care of ensuring that multiple *rtype for the same // type are coalesced into a single Type. -func toType(t *rtype) Type { +func toType(t *abi.Type) Type { if t == nil { return nil } - return t + return toRType(t) } type layoutKey struct { ftyp *funcType // function signature - rcvr *rtype // receiver type, or nil if none + rcvr *abi.Type // receiver type, or nil if none } type layoutType struct { - t *rtype + t *abi.Type framePool *sync.Pool abid abiDesc } @@ -2777,12 +2786,12 @@ var layoutCache sync.Map // map[layoutKey]layoutType // The returned type exists only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in // the name for possible debugging use. -func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abid abiDesc) { +func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) { if t.Kind() != abi.Func { panic("reflect: funcLayout of non-func type " + stringFor(&t.Type)) } - if rcvr != nil && rcvr.Kind() == Interface { - panic("reflect: funcLayout with interface receiver " + rcvr.String()) + if rcvr != nil && rcvr.Kind() == abi.Interface { + panic("reflect: funcLayout with interface receiver " + stringFor(rcvr)) } k := layoutKey{t, rcvr} if lti, ok := layoutCache.Load(k); ok { @@ -2794,7 +2803,7 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Poo abid = newAbiDesc(t, rcvr) // build dummy rtype holding gc program - x := &rtype{abi.Type{ + x := &abi.Type{ Align_: goarch.PtrSize, // Don't add spill space here; it's only necessary in // reflectcall's frame, not in the allocated frame. @@ -2802,18 +2811,18 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Poo // spill space in the frame is no longer required. Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize), PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize, - }} + } if abid.stackPtrs.n > 0 { - x.t.GCData = &abid.stackPtrs.data[0] + x.GCData = &abid.stackPtrs.data[0] } var s string if rcvr != nil { - s = "methodargs(" + rcvr.String() + ")(" + stringFor(&t.Type) + ")" + s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")" } else { s = "funcargs(" + stringFor(&t.Type) + ")" } - x.t.Str = resolveReflectName(newName(s, "", false, false)) + x.Str = resolveReflectName(newName(s, "", false, false)) // cache result for future callers framePool = &sync.Pool{New: func() any { @@ -2829,8 +2838,8 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Poo } // ifaceIndir reports whether t is stored indirectly in an interface value. -func ifaceIndir(t *rtype) bool { - return t.t.Kind_&kindDirectIface == 0 +func ifaceIndir(t *abi.Type) bool { + return t.Kind_&kindDirectIface == 0 } // Note: this type must agree with runtime.bitvector. @@ -2853,12 +2862,12 @@ func (bv *bitVector) append(bit uint8) { bv.n++ } -func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { - if t.t.PtrBytes == 0 { +func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { + if t.PtrBytes == 0 { return } - switch Kind(t.t.Kind_ & kindMask) { + switch Kind(t.Kind_ & kindMask) { case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: // 1 pointer at start of representation for bv.n < uint32(offset/uintptr(goarch.PtrSize)) { @@ -2878,7 +2887,7 @@ func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { // repeat inner type tt := (*arrayType)(unsafe.Pointer(t)) for i := 0; i < int(tt.Len); i++ { - addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, toRType(tt.Elem)) + addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem) } case Struct: diff --git a/src/reflect/value.go b/src/reflect/value.go index c46d3865dac..f079b8228bd 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -38,7 +38,7 @@ import ( // they represent. type Value struct { // typ holds the type of the value represented by a Value. - typ *rtype + typ *abi.Type // Pointer-valued data or, if flagIndir is set, pointer to data. // Valid when either flagIndir is set or typ.pointers() is true. @@ -96,7 +96,7 @@ func (f flag) ro() flag { // v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer // if v.Kind() == Pointer, the base type must not be not-in-heap. func (v Value) pointer() unsafe.Pointer { - if v.typ.Size() != goarch.PtrSize || !v.typ.pointers() { + if v.typ.Size() != goarch.PtrSize || !v.typ.Pointers() { panic("can't call pointer on a non-pointer Value") } if v.flag&flagIndir != 0 { @@ -112,7 +112,7 @@ func packEface(v Value) any { e := (*emptyInterface)(unsafe.Pointer(&i)) // First, fill in the data portion of the interface. switch { - case ifaceIndir(t): + case t.IfaceIndir(): if v.flag&flagIndir == 0 { panic("bad indir") } @@ -151,7 +151,7 @@ func unpackEface(i any) Value { return Value{} } f := flag(t.Kind()) - if ifaceIndir(t) { + if t.IfaceIndir() { f |= flagIndir } return Value{t, e.word, f} @@ -194,7 +194,7 @@ func valueMethodName() string { // emptyInterface is the header for an interface{} value. type emptyInterface struct { - typ *rtype + typ *abi.Type word unsafe.Pointer } @@ -202,9 +202,9 @@ type emptyInterface struct { type nonEmptyInterface struct { // see ../runtime/iface.go:/Itab itab *struct { - ityp *rtype // static interface type - typ *rtype // dynamic concrete type - hash uint32 // copy of typ.hash + ityp *abi.Type // static interface type + typ *abi.Type // dynamic concrete type + hash uint32 // copy of typ.hash _ [4]byte fun [100000]unsafe.Pointer // method table } @@ -275,7 +275,7 @@ func (v Value) Addr() Value { // Preserve flagRO instead of using v.flag.ro() so that // v.Addr().Elem() is equivalent to v (#32772) fl := v.flag & flagRO - return Value{v.typ.ptrTo(), v.ptr, fl | flag(Pointer)} + return Value{ptrTo(v.typ), v.ptr, fl | flag(Pointer)} } // Bool returns v's underlying value. @@ -308,13 +308,13 @@ func (v Value) Bytes() []byte { func (v Value) bytesSlow() []byte { switch v.kind() { case Slice: - if v.typ.Elem().Kind() != Uint8 { + if v.typ.Elem().Kind() != abi.Uint8 { panic("reflect.Value.Bytes of non-byte slice") } // Slice is always bigger than a word; assume flagIndir. return *(*[]byte)(v.ptr) case Array: - if v.typ.Elem().Kind() != Uint8 { + if v.typ.Elem().Kind() != abi.Uint8 { panic("reflect.Value.Bytes of non-byte array") } if !v.CanAddr() { @@ -331,7 +331,7 @@ func (v Value) bytesSlow() []byte { // It panics if v's underlying value is not a slice of runes (int32s). func (v Value) runes() []rune { v.mustBe(Slice) - if v.typ.Elem().Kind() != Int32 { + if v.typ.Elem().Kind() != abi.Int32 { panic("reflect.Value.Bytes of non-rune slice") } // Slice is always bigger than a word; assume flagIndir. @@ -393,7 +393,7 @@ func (v Value) call(op string, in []Value) []Value { var ( fn unsafe.Pointer rcvr Value - rcvrtype *rtype + rcvrtype *abi.Type ) if v.flag&flagMethod != 0 { rcvr = v @@ -521,7 +521,7 @@ func (v Value) call(op string, in []Value) []Value { // TODO(mknyszek): Figure out if it's possible to get some // scratch space for this assignment check. Previously, it // was possible to use space in the argument frame. - v = v.assignTo("reflect.Value.Call", targ, nil) + v = v.assignTo("reflect.Value.Call", &targ.t, nil) stepsLoop: for _, st := range abid.call.stepsForValue(i + inStart) { switch st.kind { @@ -529,7 +529,7 @@ func (v Value) call(op string, in []Value) []Value { // Copy values to the "stack." addr := add(stackArgs, st.stkOff, "precomputed stack arg offset") if v.flag&flagIndir != 0 { - typedmemmove(targ, addr, v.ptr) + typedmemmove(&targ.t, addr, v.ptr) } else { *(*unsafe.Pointer)(addr) = v.ptr } @@ -620,7 +620,7 @@ func (v Value) call(op string, in []Value) []Value { // allocated, the entire value is according to the ABI. So // just make an indirection into the allocated frame. fl := flagIndir | flag(tv.Kind()) - ret[i] = Value{toRType(tv), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl} + ret[i] = Value{tv, add(stackArgs, st.stkOff, "tv.Size() != 0"), fl} // Note: this does introduce false sharing between results - // if any result is live, they are all live. // (And the space for the args is live as well, but as we've @@ -629,14 +629,14 @@ func (v Value) call(op string, in []Value) []Value { } // Handle pointers passed in registers. - if !ifaceIndir(toRType(tv)) { + if !ifaceIndir(tv) { // Pointer-valued data gets put directly // into v.ptr. if steps[0].kind != abiStepPointer { print("kind=", steps[0].kind, ", type=", stringFor(tv), "\n") panic("mismatch between ABI description and types") } - ret[i] = Value{toRType(tv), regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())} + ret[i] = Value{tv, regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())} continue } @@ -649,7 +649,7 @@ func (v Value) call(op string, in []Value) []Value { // additional space to the allocated stack frame and storing the // register-allocated return values into the allocated stack frame and // referring there in the resulting Value. - s := unsafe_New(toRType(tv)) + s := unsafe_New(tv) for _, st := range steps { switch st.kind { case abiStepIntReg: @@ -667,7 +667,7 @@ func (v Value) call(op string, in []Value) []Value { panic("unknown ABI part kind") } } - ret[i] = Value{toRType(tv), s, flagIndir | flag(tv.Kind())} + ret[i] = Value{tv, s, flagIndir | flag(tv.Kind())} } } @@ -712,9 +712,8 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs ptr := frame in := make([]Value, 0, int(ftyp.InCount)) for i, typ := range ftyp.InSlice() { - typ := toRType(typ) // FIXME cleanup this loop body if typ.Size() == 0 { - in = append(in, Zero(typ)) + in = append(in, Zero(toRType(typ))) continue } v := Value{typ, nil, flag(typ.Kind())} @@ -760,7 +759,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs // Pointer-valued data gets put directly // into v.ptr. if steps[0].kind != abiStepPointer { - print("kind=", steps[0].kind, ", type=", typ.String(), "\n") + print("kind=", steps[0].kind, ", type=", stringFor(typ), "\n") panic("mismatch between ABI description and types") } v.ptr = regs.Ptrs[steps[0].ireg] @@ -806,7 +805,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs // We must clear the destination before calling assignTo, // in case assignTo writes (with memory barriers) to the // target location used as scratch space. See issue 39541. - v = v.assignTo("reflect.MakeFunc", toRType(typ), nil) + v = v.assignTo("reflect.MakeFunc", typ, nil) stepsLoop: for _, st := range abid.ret.stepsForValue(i) { switch st.kind { @@ -875,9 +874,9 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs // The return value rcvrtype gives the method's actual receiver type. // The return value t gives the method type signature (without the receiver). // The return value fn is a pointer to the method code. -func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) { +func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *abi.Type, t *funcType, fn unsafe.Pointer) { i := methodIndex - if v.typ.Kind() == Interface { + if v.typ.Kind() == abi.Interface { tt := (*interfaceType)(unsafe.Pointer(v.typ)) if uint(i) >= uint(len(tt.Methods)) { panic("reflect: internal error: invalid method index") @@ -895,17 +894,17 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *fu t = (*funcType)(unsafe.Pointer(tt.typeOff(m.Typ))) } else { rcvrtype = v.typ - ms := v.typ.exportedMethods() + ms := v.typ.ExportedMethods() if uint(i) >= uint(len(ms)) { panic("reflect: internal error: invalid method index") } m := ms[i] - if !v.typ.nameOff(m.Name).IsExported() { + if !nameOffFor(v.typ, m.Name).IsExported() { panic("reflect: " + op + " of unexported method") } - ifn := v.typ.textOff(m.Ifn) + ifn := textOffFor(v.typ, m.Ifn) fn = unsafe.Pointer(&ifn) - t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.Mtyp))) + t = (*funcType)(unsafe.Pointer(typeOffFor(v.typ, m.Mtyp))) } return } @@ -916,7 +915,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *fu // methods, which always uses one word to record the receiver. func storeRcvr(v Value, p unsafe.Pointer) { t := v.typ - if t.Kind() == Interface { + if t.Kind() == abi.Interface { // the interface data word becomes the receiver word iface := (*nonEmptyInterface)(v.ptr) *(*unsafe.Pointer)(p) = iface.word @@ -1021,7 +1020,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a if vStep.size != mStep.size { panic("method ABI and value ABI do not align") } - typedmemmove(toRType(t), + typedmemmove(t, add(methodFrame, mStep.stkOff, "precomputed stack offset"), add(valueFrame, vStep.stkOff, "precomputed stack offset")) continue @@ -1169,7 +1168,7 @@ func (v Value) capNonSlice() int { case Chan: return chancap(v.pointer()) case Ptr: - if v.typ.Elem().Kind() == Array { + if v.typ.Elem().Kind() == abi.Array { return v.typ.Elem().Len() } panic("reflect: call of reflect.Value.Cap on ptr to non-array Value") @@ -1279,7 +1278,7 @@ func (v Value) Field(i int) Value { fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind()) // Using an unexported field forces flagRO. if !field.Name.IsExported() { - if field.embedded() { + if field.Embedded() { fl |= flagEmbedRO } else { fl |= flagStickyRO @@ -1304,7 +1303,7 @@ func (v Value) FieldByIndex(index []int) Value { v.mustBe(Struct) for i, x := range index { if i > 0 { - if v.Kind() == Pointer && v.typ.Elem().Kind() == Struct { + if v.Kind() == Pointer && v.typ.Elem().Kind() == abi.Struct { if v.IsNil() { panic("reflect: indirection through nil pointer to embedded struct") } @@ -1327,9 +1326,9 @@ func (v Value) FieldByIndexErr(index []int) (Value, error) { v.mustBe(Struct) for i, x := range index { if i > 0 { - if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct { + if v.Kind() == Ptr && v.typ.Elem().Kind() == abi.Struct { if v.IsNil() { - return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + v.typ.Elem().Name()) + return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + nameFor(v.typ.Elem())) } v = v.Elem() } @@ -1344,7 +1343,7 @@ func (v Value) FieldByIndexErr(index []int) (Value, error) { // It panics if v's Kind is not struct. func (v Value) FieldByName(name string) Value { v.mustBe(Struct) - if f, ok := v.typ.FieldByName(name); ok { + if f, ok := toRType(v.typ).FieldByName(name); ok { return v.FieldByIndex(f.Index) } return Value{} @@ -1355,7 +1354,7 @@ func (v Value) FieldByName(name string) Value { // It panics if v's Kind is not struct. // It returns the zero Value if no field was found. func (v Value) FieldByNameFunc(match func(string) bool) Value { - if f, ok := v.typ.FieldByNameFunc(match); ok { + if f, ok := toRType(v.typ).FieldByNameFunc(match); ok { return v.FieldByIndex(f.Index) } return Value{} @@ -1395,7 +1394,7 @@ func (v Value) Index(i int) Value { if uint(i) >= uint(tt.Len) { panic("reflect: array index out of range") } - typ := toRType(tt.Elem) + typ := tt.Elem offset := uintptr(i) * typ.Size() // Either flagIndir is set and v.ptr points at array, @@ -1583,11 +1582,11 @@ func (v Value) IsZero() bool { return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 case Array: // If the type is comparable, then compare directly with zero. - if v.typ.t.Equal != nil && v.typ.Size() <= maxZero { + if v.typ.Equal != nil && v.typ.Size() <= maxZero { if v.flag&flagIndir == 0 { return v.ptr == nil } - return v.typ.t.Equal(v.ptr, unsafe.Pointer(&zeroVal[0])) + return v.typ.Equal(v.ptr, unsafe.Pointer(&zeroVal[0])) } n := v.Len() @@ -1603,11 +1602,11 @@ func (v Value) IsZero() bool { return v.Len() == 0 case Struct: // If the type is comparable, then compare directly with zero. - if v.typ.t.Equal != nil && v.typ.Size() <= maxZero { + if v.typ.Equal != nil && v.typ.Size() <= maxZero { if v.flag&flagIndir == 0 { return v.ptr == nil } - return v.typ.t.Equal(v.ptr, unsafe.Pointer(&zeroVal[0])) + return v.typ.Equal(v.ptr, unsafe.Pointer(&zeroVal[0])) } n := v.NumField() @@ -1707,7 +1706,7 @@ func (v Value) lenNonSlice() int { // String is bigger than a word; assume flagIndir. return (*unsafeheader.String)(v.ptr).Len case Ptr: - if v.typ.Elem().Kind() == Array { + if v.typ.Elem().Kind() == abi.Array { return v.typ.Elem().Len() } panic("reflect: call of reflect.Value.Len on ptr to non-array Value") @@ -1971,8 +1970,8 @@ func (f flag) panicNotMap() { // copyVal returns a Value containing the map key or value at ptr, // allocating a new variable as needed. -func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value { - if ifaceIndir(typ) { +func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value { + if typ.IfaceIndir() { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(typ) @@ -1990,10 +1989,10 @@ func (v Value) Method(i int) Value { if v.typ == nil { panic(&ValueError{"reflect.Value.Method", Invalid}) } - if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) { + if v.flag&flagMethod != 0 || uint(i) >= uint(toRType(v.typ).NumMethod()) { panic("reflect: Method index out of range") } - if v.typ.Kind() == Interface && v.IsNil() { + if v.typ.Kind() == abi.Interface && v.IsNil() { panic("reflect: Method on nil interface value") } fl := v.flag.ro() | (v.flag & flagIndir) @@ -2014,7 +2013,7 @@ func (v Value) NumMethod() int { if v.flag&flagMethod != 0 { return 0 } - return v.typ.NumMethod() + return toRType(v.typ).NumMethod() } // MethodByName returns a function value corresponding to the method @@ -2029,7 +2028,7 @@ func (v Value) MethodByName(name string) Value { if v.flag&flagMethod != 0 { return Value{} } - m, ok := v.typ.MethodByName(name) + m, ok := toRType(v.typ).MethodByName(name) if !ok { return Value{} } @@ -2125,7 +2124,7 @@ func (v Value) Pointer() uintptr { k := v.kind() switch k { case Pointer: - if v.typ.t.PtrBytes == 0 { + if v.typ.PtrBytes == 0 { val := *(*uintptr)(v.ptr) // Since it is a not-in-heap pointer, all pointers to the heap are // forbidden! See comment in Value.Elem and issue #48399. @@ -2180,11 +2179,10 @@ func (v Value) recv(nb bool) (val Value, ok bool) { panic("reflect: recv on send-only channel") } t := tt.Elem - rt := toRType(t) - val = Value{rt, nil, flag(t.Kind())} + val = Value{t, nil, flag(t.Kind())} var p unsafe.Pointer - if ifaceIndir(rt) { - p = unsafe_New(rt) + if ifaceIndir(t) { + p = unsafe_New(t) val.ptr = p val.flag |= flagIndir } else { @@ -2214,7 +2212,7 @@ func (v Value) send(x Value, nb bool) (selected bool) { panic("reflect: send on recv-only channel") } x.mustBeExported() - x = x.assignTo("reflect.Value.Send", toRType(tt.Elem), nil) + x = x.assignTo("reflect.Value.Send", tt.Elem, nil) var p unsafe.Pointer if x.flag&flagIndir != 0 { p = x.ptr @@ -2260,7 +2258,7 @@ func (v Value) SetBool(x bool) { func (v Value) SetBytes(x []byte) { v.mustBeAssignable() v.mustBe(Slice) - if v.typ.Elem().Kind() != Uint8 { + if toRType(v.typ).Elem().Kind() != Uint8 { // TODO add Elem method, fix mustBe(Slice) to return slice. panic("reflect.Value.SetBytes of non-byte slice") } *(*[]byte)(v.ptr) = x @@ -2271,7 +2269,7 @@ func (v Value) SetBytes(x []byte) { func (v Value) setRunes(x []rune) { v.mustBeAssignable() v.mustBe(Slice) - if v.typ.Elem().Kind() != Int32 { + if v.typ.Elem().Kind() != abi.Int32 { panic("reflect.Value.setRunes of non-rune slice") } *(*[]rune)(v.ptr) = x @@ -2500,7 +2498,7 @@ func (v Value) Slice(i, j int) Value { } fl := v.flag.ro() | flagIndir | flag(Slice) - return Value{typ.common(), unsafe.Pointer(&x), fl} + return Value{typ.Common(), unsafe.Pointer(&x), fl} } // Slice3 is the 3-index form of the slice operation: it returns v[i:j:k]. @@ -2552,7 +2550,7 @@ func (v Value) Slice3(i, j, k int) Value { } fl := v.flag.ro() | flagIndir | flag(Slice) - return Value{typ.common(), unsafe.Pointer(&x), fl} + return Value{typ.Common(), unsafe.Pointer(&x), fl} } // String returns the string v's underlying value, as a string. @@ -2602,7 +2600,7 @@ func (v Value) TrySend(x Value) bool { // Type returns v's type. func (v Value) Type() Type { if v.flag != 0 && v.flag&flagMethod == 0 { - return v.typ + return (*rtype)(unsafe.Pointer(v.typ)) // inline of toRType(v.typ), for own inlining in inline test } return v.typeSlow() } @@ -2612,28 +2610,28 @@ func (v Value) typeSlow() Type { panic(&ValueError{"reflect.Value.Type", Invalid}) } if v.flag&flagMethod == 0 { - return v.typ + return toRType(v.typ) } // Method value. // v.typ describes the receiver, not the method type. i := int(v.flag) >> flagMethodShift - if v.typ.Kind() == Interface { + if v.typ.Kind() == abi.Interface { // Method on interface. tt := (*interfaceType)(unsafe.Pointer(v.typ)) if uint(i) >= uint(len(tt.Methods)) { panic("reflect: internal error: invalid method index") } m := &tt.Methods[i] - return v.typ.typeOff(m.Typ) + return toRType(typeOffFor(v.typ, m.Typ)) } // Method on concrete type. - ms := v.typ.exportedMethods() + ms := v.typ.ExportedMethods() if uint(i) >= uint(len(ms)) { panic("reflect: internal error: invalid method index") } m := ms[i] - return v.typ.typeOff(m.Mtyp) + return toRType(typeOffFor(v.typ, m.Mtyp)) } // CanUint reports whether Uint can be used without panicking. @@ -2702,7 +2700,7 @@ func (v Value) UnsafePointer() unsafe.Pointer { k := v.kind() switch k { case Pointer: - if v.typ.t.PtrBytes == 0 { + if v.typ.PtrBytes == 0 { // Since it is a not-in-heap pointer, all pointers to the heap are // forbidden! See comment in Value.Elem and issue #48399. if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) { @@ -2803,7 +2801,7 @@ func (v Value) grow(n int) { case p.Len+n < 0: panic("reflect.Value.Grow: slice overflow") case p.Len+n > p.Cap: - t := v.typ.Elem().(*rtype) + t := v.typ.Elem() *p = growslice(t, *p, n) } } @@ -2890,7 +2888,7 @@ func Copy(dst, src Value) int { sk := src.kind() var stringCopy bool if sk != Array && sk != Slice { - stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8 + stringCopy = sk == String && dst.typ.Elem().Kind() == abi.Uint8 if !stringCopy { panic(&ValueError{"reflect.Copy", sk}) } @@ -2900,7 +2898,7 @@ func Copy(dst, src Value) int { de := dst.typ.Elem() if !stringCopy { se := src.typ.Elem() - typesMustMatch("reflect.Copy", de, se) + typesMustMatch("reflect.Copy", toType(de), toType(se)) } var ds, ss unsafeheader.Slice @@ -2924,7 +2922,7 @@ func Copy(dst, src Value) int { ss.Cap = sh.Len } - return typedslicecopy(de.common(), ds, ss) + return typedslicecopy(de.Common(), ds, ss) } // A runtimeSelect is a single case passed to rselect. @@ -3040,7 +3038,7 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { panic("reflect.Select: SendDir case missing Send value") } v.mustBeExported() - v = v.assignTo("reflect.Select", toRType(tt.Elem), nil) + v = v.assignTo("reflect.Select", tt.Elem, nil) if v.flag&flagIndir != 0 { rc.val = v.ptr } else { @@ -3063,7 +3061,7 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { } rc.ch = ch.pointer() rc.typ = toRType(&tt.Type) - rc.val = unsafe_New(toRType(tt.Elem)) + rc.val = unsafe_New(tt.Elem) } } @@ -3071,13 +3069,12 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { if runcases[chosen].dir == SelectRecv { tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ)) t := tt.Elem - rt := toRType(t) p := runcases[chosen].val fl := flag(t.Kind()) - if ifaceIndir(rt) { - recv = Value{rt, p, fl | flagIndir} + if t.IfaceIndir() { + recv = Value{t, p, fl | flagIndir} } else { - recv = Value{rt, *(*unsafe.Pointer)(p), fl} + recv = Value{t, *(*unsafe.Pointer)(p), fl} } } return chosen, recv, recvOK @@ -3088,8 +3085,8 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { */ // implemented in package runtime -func unsafe_New(*rtype) unsafe.Pointer -func unsafe_NewArray(*rtype, int) unsafe.Pointer +func unsafe_New(*abi.Type) unsafe.Pointer +func unsafe_NewArray(*abi.Type, int) unsafe.Pointer // MakeSlice creates a new zero-initialized slice value // for the specified slice type, length, and capacity. @@ -3107,8 +3104,8 @@ func MakeSlice(typ Type, len, cap int) Value { panic("reflect.MakeSlice: len > cap") } - s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap} - return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)} + s := unsafeheader.Slice{Data: unsafe_NewArray(&(typ.Elem().(*rtype).t), cap), Len: len, Cap: cap} + return Value{&typ.(*rtype).t, unsafe.Pointer(&s), flagIndir | flag(Slice)} } // MakeChan creates a new channel with the specified type and buffer size. @@ -3122,7 +3119,7 @@ func MakeChan(typ Type, buffer int) Value { if typ.ChanDir() != BothDir { panic("reflect.MakeChan: unidirectional channel type") } - t := typ.(*rtype) + t := typ.common() ch := makechan(t, buffer) return Value{t, ch, flag(Chan)} } @@ -3138,7 +3135,7 @@ func MakeMapWithSize(typ Type, n int) Value { if typ.Kind() != Map { panic("reflect.MakeMapWithSize of non-map type") } - t := typ.(*rtype) + t := typ.common() m := makemap(t, n) return Value{t, m, flag(Map)} } @@ -3178,9 +3175,9 @@ func Zero(typ Type) Value { if typ == nil { panic("reflect: Zero(nil)") } - t := typ.(*rtype) + t := &typ.(*rtype).t fl := flag(t.Kind()) - if ifaceIndir(t) { + if t.IfaceIndir() { var p unsafe.Pointer if t.Size() <= maxZero { p = unsafe.Pointer(&zeroVal[0]) @@ -3204,8 +3201,8 @@ func New(typ Type) Value { if typ == nil { panic("reflect: New(nil)") } - t := typ.(*rtype) - pt := t.ptrTo() + t := &typ.(*rtype).t + pt := ptrTo(t) if ifaceIndir(pt) { // This is a pointer to a not-in-heap type. panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)") @@ -3228,7 +3225,7 @@ func NewAt(typ Type, p unsafe.Pointer) Value { // For a conversion to an interface type, target, if not nil, // is a suggested scratch space to use. // target must be initialized memory (or nil). -func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value { +func (v Value) assignTo(context string, dst *abi.Type, target unsafe.Pointer) Value { if v.flag&flagMethod != 0 { v = makeMethodValue(context, v) } @@ -3261,7 +3258,7 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value } // Failed. - panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String()) + panic(context + ": value of type " + stringFor(v.typ) + " is not assignable to type " + stringFor(dst)) } // Convert returns the value v converted to type t. @@ -3273,7 +3270,7 @@ func (v Value) Convert(t Type) Value { } op := convertOp(t.common(), v.typ) if op == nil { - panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String()) + panic("reflect.Value.Convert: value of type " + stringFor(v.typ) + " cannot be converted to type " + t.String()) } return op(v, t) } @@ -3415,10 +3412,10 @@ func (v Value) Equal(u Value) bool { // convertOp returns the function to convert a value of type src // to a value of type dst. If the conversion is illegal, convertOp returns nil. -func convertOp(dst, src *rtype) func(Value, Type) Value { - switch src.Kind() { +func convertOp(dst, src *abi.Type) func(Value, Type) Value { + switch Kind(src.Kind()) { case Int, Int8, Int16, Int32, Int64: - switch dst.Kind() { + switch Kind(dst.Kind()) { case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: return cvtInt case Float32, Float64: @@ -3428,7 +3425,7 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: - switch dst.Kind() { + switch Kind(dst.Kind()) { case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: return cvtUint case Float32, Float64: @@ -3438,7 +3435,7 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } case Float32, Float64: - switch dst.Kind() { + switch Kind(dst.Kind()) { case Int, Int8, Int16, Int32, Int64: return cvtFloatInt case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr: @@ -3448,14 +3445,14 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } case Complex64, Complex128: - switch dst.Kind() { + switch Kind(dst.Kind()) { case Complex64, Complex128: return cvtComplex } case String: - if dst.Kind() == Slice && dst.Elem().PkgPath() == "" { - switch dst.Elem().Kind() { + if dst.Kind() == abi.Slice && pkgPathFor(dst.Elem()) == "" { + switch Kind(dst.Elem().Kind()) { case Uint8: return cvtStringBytes case Int32: @@ -3464,8 +3461,8 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } case Slice: - if dst.Kind() == String && src.Elem().PkgPath() == "" { - switch src.Elem().Kind() { + if dst.Kind() == abi.String && pkgPathFor(src.Elem()) == "" { + switch Kind(src.Elem().Kind()) { case Uint8: return cvtBytesString case Int32: @@ -3474,17 +3471,17 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } // "x is a slice, T is a pointer-to-array type, // and the slice and array types have identical element types." - if dst.Kind() == Pointer && dst.Elem().Kind() == Array && src.Elem() == dst.Elem().Elem() { + if dst.Kind() == abi.Pointer && dst.Elem().Kind() == abi.Array && src.Elem() == dst.Elem().Elem() { return cvtSliceArrayPtr } // "x is a slice, T is an array type, // and the slice and array types have identical element types." - if dst.Kind() == Array && src.Elem() == dst.Elem() { + if dst.Kind() == abi.Array && src.Elem() == dst.Elem() { return cvtSliceArray } case Chan: - if dst.Kind() == Chan && specialChannelAssignability(dst, src) { + if dst.Kind() == abi.Chan && specialChannelAssignability(dst, src) { return cvtDirect } } @@ -3495,14 +3492,14 @@ func convertOp(dst, src *rtype) func(Value, Type) Value { } // dst and src are non-defined pointer types with same underlying base type. - if dst.Kind() == Pointer && dst.Name() == "" && - src.Kind() == Pointer && src.Name() == "" && - haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) { + if dst.Kind() == abi.Pointer && nameFor(dst) == "" && + src.Kind() == abi.Pointer && nameFor(src) == "" && + haveIdenticalUnderlyingType(elem(dst), elem(src), false) { return cvtDirect } if implements(dst, src) { - if src.Kind() == Interface { + if src.Kind() == abi.Interface { return cvtI2I } return cvtT2I @@ -3723,7 +3720,7 @@ func cvtT2I(v Value, typ Type) Value { if typ.NumMethod() == 0 { *(*any)(target) = x } else { - ifaceE2I(typ.(*rtype), x, target) + ifaceE2I(typ.common(), x, target) } return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)} } @@ -3757,29 +3754,29 @@ func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, receive //go:noescape func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool -func makechan(typ *rtype, size int) (ch unsafe.Pointer) -func makemap(t *rtype, cap int) (m unsafe.Pointer) +func makechan(typ *abi.Type, size int) (ch unsafe.Pointer) +func makemap(t *abi.Type, cap int) (m unsafe.Pointer) //go:noescape -func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) +func mapaccess(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) //go:noescape -func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer) +func mapaccess_faststr(t *abi.Type, m unsafe.Pointer, key string) (val unsafe.Pointer) //go:noescape -func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(t *abi.Type, m unsafe.Pointer, key, val unsafe.Pointer) //go:noescape -func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer) +func mapassign_faststr(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer) //go:noescape -func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer) +func mapdelete(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) //go:noescape -func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string) +func mapdelete_faststr(t *abi.Type, m unsafe.Pointer, key string) //go:noescape -func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter) +func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter) //go:noescape func mapiterkey(it *hiter) (key unsafe.Pointer) @@ -3793,7 +3790,7 @@ func mapiternext(it *hiter) //go:noescape func maplen(m unsafe.Pointer) int -func mapclear(t *rtype, m unsafe.Pointer) +func mapclear(t *abi.Type, m unsafe.Pointer) // call calls fn with "stackArgsSize" bytes of stack arguments laid out // at stackArgs and register arguments laid out in regArgs. frameSize is @@ -3821,9 +3818,9 @@ func mapclear(t *rtype, m unsafe.Pointer) // //go:noescape //go:linkname call runtime.reflectcall -func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) +func call(stackArgsType *abi.Type, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) -func ifaceE2I(t *rtype, src any, dst unsafe.Pointer) +func ifaceE2I(t *abi.Type, src any, dst unsafe.Pointer) // memmove copies size bytes to dst from src. No write barriers are used. // @@ -3833,38 +3830,38 @@ func memmove(dst, src unsafe.Pointer, size uintptr) // typedmemmove copies a value of type t to dst from src. // //go:noescape -func typedmemmove(t *rtype, dst, src unsafe.Pointer) +func typedmemmove(t *abi.Type, dst, src unsafe.Pointer) // typedmemclr zeros the value at ptr of type t. // //go:noescape -func typedmemclr(t *rtype, ptr unsafe.Pointer) +func typedmemclr(t *abi.Type, ptr unsafe.Pointer) // typedmemclrpartial is like typedmemclr but assumes that // dst points off bytes into the value and only clears size bytes. // //go:noescape -func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr) +func typedmemclrpartial(t *abi.Type, ptr unsafe.Pointer, off, size uintptr) // typedslicecopy copies a slice of elemType values from src to dst, // returning the number of elements copied. // //go:noescape -func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int +func typedslicecopy(t *abi.Type, dst, src unsafeheader.Slice) int // typedarrayclear zeroes the value at ptr of an array of elemType, // only clears len elem. // //go:noescape -func typedarrayclear(elemType *rtype, ptr unsafe.Pointer, len int) +func typedarrayclear(elemType *abi.Type, ptr unsafe.Pointer, len int) //go:noescape -func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr +func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr func verifyNotInHeapPtr(p uintptr) bool //go:noescape -func growslice(t *rtype, old unsafeheader.Slice, num int) unsafeheader.Slice +func growslice(t *abi.Type, old unsafeheader.Slice, num int) unsafeheader.Slice // Dummy annotation marking that the value x escapes, // for use in cases where the reflect code is so clever that