diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index a363b839847..f2728d972f7 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -163,6 +163,7 @@ func ParseFlags() { if buildcfg.Experiment.Unified { Debug.Unified = 1 } + Debug.SyncFrames = -1 // disable sync markers by default Debug.Checkptr = -1 // so we can tell whether it is set explicitly diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 70f1a2f847f..a5a2d56c466 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -75,11 +75,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "") - // We won't know localpkg's height until after import - // processing. In the mean time, set to MaxPkgHeight to ensure - // height comparisons at least work until then. - types.LocalPkg.Height = types.MaxPkgHeight - // pseudo-package, for scoping types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go index 576036bdce3..440f557a80d 100644 --- a/src/cmd/compile/internal/importer/iimport.go +++ b/src/cmd/compile/internal/importer/iimport.go @@ -139,22 +139,19 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) - pkgHeight := int(r.uint64()) + _ = int(r.uint64()) // was package height, but not necessary anymore. if pkgPath == "" { pkgPath = path } pkg := imports[pkgPath] if pkg == nil { - pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight) + pkg = types2.NewPackage(pkgPath, pkgName) imports[pkgPath] = pkg } else { if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } - if pkg.Height() != pkgHeight { - errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path) - } } p.pkgCache[pkgPathOff] = pkg diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index e5547b6d444..d00b765859c 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -39,7 +39,7 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + r.Bool() // TODO(mdempsky): Remove; was "has init" for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, @@ -162,9 +162,7 @@ func (r *reader) doPkg() *types2.Package { } name := r.String() - height := r.Len() - - pkg := types2.NewPackageHeight(path, name, height) + pkg := types2.NewPackage(path, name) r.p.imports[path] = pkg // TODO(mdempsky): The list of imported packages is important for diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 9ef016ab73f..77848577c6b 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -430,6 +430,36 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { case ir.OMETHEXPR: v.budget++ // Hack for toolstash -cmp. + + case ir.OAS2: + n := n.(*ir.AssignListStmt) + + // Unified IR unconditionally rewrites: + // + // a, b = f() + // + // into: + // + // DCL tmp1 + // DCL tmp2 + // tmp1, tmp2 = f() + // a, b = tmp1, tmp2 + // + // so that it can insert implicit conversions as necessary. To + // minimize impact to the existing inlining heuristics (in + // particular, to avoid breaking the existing inlinability regress + // tests), we need to compensate for this here. + if base.Debug.Unified != 0 { + if init := n.Rhs[0].Init(); len(init) == 1 { + if _, ok := init[0].(*ir.AssignListStmt); ok { + // 4 for each value, because each temporary variable now + // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. + // + // 1 for the extra "tmp1, tmp2 = f()" assignment statement. + v.budget += 4*int32(len(n.Lhs)) + 1 + } + } + } } v.budget-- @@ -655,9 +685,8 @@ var inlgen int var SSADumpInline = func(*ir.Func) {} // NewInline allows the inliner implementation to be overridden. -// If it returns nil, the legacy inliner will handle this call -// instead. -var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil } +// If it returns nil, the function will not be inlined. +var NewInline = oldInline // If n is a OCALLFUNC node, and fn is an ONAME node for a // function with an inlinable body, return an OINLCALL node that can replace n. @@ -777,7 +806,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b res := NewInline(n, fn, inlIndex) if res == nil { - res = oldInline(n, fn, inlIndex) + return n } // transitive inlining diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 8ac7e7f4f77..0058a98824e 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -119,8 +119,9 @@ func (n *BasicLit) SetVal(val constant.Value) { n.val = val } // or Op(X, Y) for builtin functions that do not become calls. type BinaryExpr struct { miniExpr - X Node - Y Node + X Node + Y Node + RType Node `mknode:"-"` // see reflectdata/helpers.go } func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { @@ -148,6 +149,7 @@ type CallExpr struct { origNode X Node Args Nodes + RType Node `mknode:"-"` // see reflectdata/helpers.go KeepAlive []*Name // vars to be kept alive until call returns IsDDD bool NoInline bool @@ -192,6 +194,7 @@ type CompLitExpr struct { miniExpr origNode List Nodes // initialized values + RType Node `mknode:"-"` // *runtime._type for OMAPLIT map types Prealloc *Name // For OSLICELIT, Len is the backing array length. // For OMAPLIT, Len is the number of entries that we've removed from List and @@ -246,6 +249,27 @@ func (n *ConstExpr) Val() constant.Value { return n.val } type ConvExpr struct { miniExpr X Node + + // For implementing OCONVIFACE expressions. + // + // TypeWord is an expression yielding a *runtime._type or + // *runtime.itab value to go in the type word of the iface/eface + // result. See reflectdata.ConvIfaceTypeWord for further details. + // + // SrcRType is an expression yielding a *runtime._type value for X, + // if it's not pointer-shaped and needs to be heap allocated. + TypeWord Node `mknode:"-"` + SrcRType Node `mknode:"-"` + + // For -d=checkptr instrumentation of conversions from + // unsafe.Pointer to *Elem or *[Len]Elem. + // + // TODO(mdempsky): We only ever need one of these, but currently we + // don't decide which one until walk. Longer term, it probably makes + // sense to have a dedicated IR op for `(*[Len]Elem)(ptr)[:n:m]` + // expressions. + ElemRType Node `mknode:"-"` + ElemElemRType Node `mknode:"-"` } func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { @@ -275,6 +299,7 @@ type IndexExpr struct { miniExpr X Node Index Node + RType Node `mknode:"-"` // see reflectdata/helpers.go Assigned bool } @@ -385,8 +410,9 @@ func (n *LogicalExpr) SetOp(op Op) { // but *not* OMAKE (that's a pre-typechecking CallExpr). type MakeExpr struct { miniExpr - Len Node - Cap Node + RType Node `mknode:"-"` // see reflectdata/helpers.go + Len Node + Cap Node } func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { @@ -623,7 +649,7 @@ type TypeAssertExpr struct { // Runtime type information provided by walkDotType for // assertions from non-empty interface to concrete type. - ITab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type + ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type } func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr { @@ -650,6 +676,11 @@ type DynamicTypeAssertExpr struct { miniExpr X Node + // SrcRType is an expression that yields a *runtime._type value + // representing X's type. It's used in failed assertion panic + // messages. + SrcRType Node + // RType is an expression that yields a *runtime._type value // representing the asserted type. // diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index c46debce36b..cae773b7227 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -170,6 +170,17 @@ type CaseClause struct { miniStmt Var *Name // declared variable for this case in type switch List Nodes // list of expressions for switch, early select + + // RTypes is a list of RType expressions, which are copied to the + // corresponding OEQ nodes that are emitted when switch statements + // are desugared. RTypes[i] must be non-nil if the emitted + // comparison for List[i] will be a mixed interface/concrete + // comparison; see reflectdata.CompareRType for details. + // + // Because mixed interface/concrete switch cases are rare, we allow + // len(RTypes) < len(List). Missing entries are implicitly nil. + RTypes Nodes + Body Nodes } @@ -333,11 +344,20 @@ type RangeStmt struct { Label *types.Sym Def bool X Node + RType Node `mknode:"-"` // see reflectdata/helpers.go Key Node Value Node Body Nodes HasBreak bool Prealloc *Name + + // When desugaring the RangeStmt during walk, the assignments to Key + // and Value may require OCONVIFACE operations. If so, these fields + // will be copied to their respective ConvExpr fields. + KeyTypeWord Node `mknode:"-"` + KeySrcRType Node `mknode:"-"` + ValueTypeWord Node `mknode:"-"` + ValueSrcRType Node `mknode:"-"` } func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt { diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go index 8f54a07ca46..1a60ea39bb8 100644 --- a/src/cmd/compile/internal/noder/codes.go +++ b/src/cmd/compile/internal/noder/codes.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +6,7 @@ package noder import "internal/pkgbits" +// A codeStmt distinguishes among statement encodings. type codeStmt int func (c codeStmt) Marker() pkgbits.SyncMarker { return pkgbits.SyncStmt1 } @@ -31,6 +30,7 @@ const ( stmtSelect ) +// A codeExpr distinguishes among expression encodings. type codeExpr int func (c codeExpr) Marker() pkgbits.SyncMarker { return pkgbits.SyncExpr } @@ -38,12 +38,9 @@ func (c codeExpr) Value() int { return int(c) } // TODO(mdempsky): Split expr into addr, for lvalues. const ( - exprNone codeExpr = iota - exprConst - exprType // type expression - exprLocal // local variable - exprName // global variable or function - exprBlank + exprConst codeExpr = iota + exprLocal // local variable + exprGlobal // global variable or function exprCompLit exprFuncLit exprSelector @@ -54,8 +51,23 @@ const ( exprBinaryOp exprCall exprConvert + exprNew + exprMake + exprNil ) +type codeAssign int + +func (c codeAssign) Marker() pkgbits.SyncMarker { return pkgbits.SyncAssign } +func (c codeAssign) Value() int { return int(c) } + +const ( + assignBlank codeAssign = iota + assignDef + assignExpr +) + +// A codeDecl distinguishes among declaration encodings. type codeDecl int func (c codeDecl) Marker() pkgbits.SyncMarker { return pkgbits.SyncDecl } diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go index a1160d42c44..54b07c39f49 100644 --- a/src/cmd/compile/internal/noder/expr.go +++ b/src/cmd/compile/internal/noder/expr.go @@ -6,7 +6,6 @@ package noder import ( "fmt" - "go/constant" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -53,31 +52,9 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node { base.Assert(g.exprStmtOK) - // The gc backend expects all expressions to have a concrete type, and - // types2 mostly satisfies this expectation already. But there are a few - // cases where the Go spec doesn't require converting to concrete type, - // and so types2 leaves them untyped. So we need to fix those up here. - typ := tv.Type - if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 { - switch basic.Kind() { - case types2.UntypedNil: - // ok; can appear in type switch case clauses - // TODO(mdempsky): Handle as part of type switches instead? - case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex: - // Untyped rhs of non-constant shift, e.g. x << 1.0. - // If we have a constant value, it must be an int >= 0. - if tv.Value != nil { - s := constant.ToInt(tv.Value) - assert(s.Kind() == constant.Int && constant.Sign(s) >= 0) - } - typ = types2.Typ[types2.Uint] - case types2.UntypedBool: - typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition - case types2.UntypedString: - typ = types2.Typ[types2.String] // argument to "append" or "copy" calls - default: - base.FatalfAt(g.pos(expr), "unexpected untyped type: %v", basic) - } + typ := idealType(tv) + if typ == nil { + base.FatalfAt(g.pos(expr), "unexpected untyped type: %v", tv.Type) } // Constant expression. diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go index 33acd6051ac..40f80ab528d 100644 --- a/src/cmd/compile/internal/noder/helpers.go +++ b/src/cmd/compile/internal/noder/helpers.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "cmd/compile/internal/types2" "cmd/internal/src" ) @@ -39,10 +40,6 @@ func typed(typ *types.Type, n ir.Node) ir.Node { // Values -func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node { - return typed(typ, ir.NewBasicLit(pos, val)) -} - func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node { orig := ir.NewRawOrigExpr(pos, op, raw) return ir.NewConstExpr(val, typed(typ, orig)) @@ -224,3 +221,33 @@ func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt { } return ir.NewAssignOpStmt(pos, op, x, bl) } + +func idealType(tv types2.TypeAndValue) types2.Type { + // The gc backend expects all expressions to have a concrete type, and + // types2 mostly satisfies this expectation already. But there are a few + // cases where the Go spec doesn't require converting to concrete type, + // and so types2 leaves them untyped. So we need to fix those up here. + typ := tv.Type + if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 { + switch basic.Kind() { + case types2.UntypedNil: + // ok; can appear in type switch case clauses + // TODO(mdempsky): Handle as part of type switches instead? + case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex: + // Untyped rhs of non-constant shift, e.g. x << 1.0. + // If we have a constant value, it must be an int >= 0. + if tv.Value != nil { + s := constant.ToInt(tv.Value) + assert(s.Kind() == constant.Int && constant.Sign(s) >= 0) + } + typ = types2.Typ[types2.Uint] + case types2.UntypedBool: + typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition + case types2.UntypedString: + typ = types2.Typ[types2.String] // argument to "append" or "copy" calls + default: + return nil + } + } + return typ +} diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go index 2cef9f75e80..49b8fd142a0 100644 --- a/src/cmd/compile/internal/noder/import.go +++ b/src/cmd/compile/internal/noder/import.go @@ -241,7 +241,7 @@ func readImportFile(path string, target *ir.Package, env *types2.Context, packag pr := pkgbits.NewPkgDecoder(pkg1.Path, data) // Read package descriptors for both types2 and compiler backend. - readPackage(newPkgReader(pr), pkg1) + readPackage(newPkgReader(pr), pkg1, false) pkg2 = importer.ReadPackage(env, packages, pr) case 'i': diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go index e45a204867d..ad937eac62e 100644 --- a/src/cmd/compile/internal/noder/irgen.go +++ b/src/cmd/compile/internal/noder/irgen.go @@ -219,7 +219,6 @@ type typeDelayInfo struct { func (g *irgen) generate(noders []*noder) { types.LocalPkg.Name = g.self.Name() - types.LocalPkg.Height = g.self.Height() typecheck.TypecheckAllowed = true // Prevent size calculations until we set the underlying type diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go index a58b9b930cf..0f39fdec051 100644 --- a/src/cmd/compile/internal/noder/linker.go +++ b/src/cmd/compile/internal/noder/linker.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -34,13 +32,20 @@ import ( // low-level linking details can be moved there, but the logic for // handling extension data needs to stay in the compiler. +// A linker combines a package's stub export data with any referenced +// elements from imported packages into a single, self-contained +// export data file. type linker struct { pw pkgbits.PkgEncoder - pkgs map[string]pkgbits.Index - decls map[*types.Sym]pkgbits.Index + pkgs map[string]pkgbits.Index + decls map[*types.Sym]pkgbits.Index + bodies map[*types.Sym]pkgbits.Index } +// relocAll ensures that all elements specified by pr and relocs are +// copied into the output export data file, and returns the +// corresponding indices in the output. func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.RelocEnt { res := make([]pkgbits.RelocEnt, len(relocs)) for i, rent := range relocs { @@ -50,6 +55,8 @@ func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.Re return res } +// relocIdx ensures a single element is copied into the output export +// data file, and returns the corresponding index in the output. func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index) pkgbits.Index { assert(pr != nil) @@ -85,10 +92,19 @@ func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index) return newidx } +// relocString copies the specified string from pr into the output +// export data file, deduplicating it against other strings. func (l *linker) relocString(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { return l.pw.StringIdx(pr.StringIdx(idx)) } +// relocPkg copies the specified package from pr into the output +// export data file, rewriting its import path to match how it was +// imported. +// +// TODO(mdempsky): Since CL 391014, we already have the compilation +// unit's import path, so there should be no need to rewrite packages +// anymore. func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { path := pr.PeekPkgPath(idx) @@ -114,6 +130,9 @@ func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { return w.Flush() } +// relocObj copies the specified object from pr into the output export +// data file, rewriting its compiler-private extension data (e.g., +// adding inlining cost and escape analysis results for functions). func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { path, name, tag := pr.PeekObj(idx) sym := types.NewPkg(path, "").Lookup(name) @@ -152,21 +171,12 @@ func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { l.relocCommon(pr, &wname, pkgbits.RelocName, idx) l.relocCommon(pr, &wdict, pkgbits.RelocObjDict, idx) - var obj *ir.Name - if sym.Pkg == types.LocalPkg { - var ok bool - obj, ok = sym.Def.(*ir.Name) + // Generic types and functions won't have definitions, and imported + // objects may not either. + obj, _ := sym.Def.(*ir.Name) + local := sym.Pkg == types.LocalPkg - // Generic types and functions and declared constraint types won't - // have definitions. - // For now, just generically copy their extension data. - // TODO(mdempsky): Restore assertion. - if !ok && false { - base.Fatalf("missing definition for %v", sym) - } - } - - if obj != nil { + if local && obj != nil { wext.Sync(pkgbits.SyncObject1) switch tag { case pkgbits.ObjFunc: @@ -181,9 +191,66 @@ func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { l.relocCommon(pr, &wext, pkgbits.RelocObjExt, idx) } + // Check if we need to export the inline bodies for functions and + // methods. + if obj != nil { + if obj.Op() == ir.ONAME && obj.Class == ir.PFUNC { + l.exportBody(obj, local) + } + + if obj.Op() == ir.OTYPE { + if typ := obj.Type(); !typ.IsInterface() { + for _, method := range typ.Methods().Slice() { + l.exportBody(method.Nname.(*ir.Name), local) + } + } + } + } + return w.Idx } +// exportBody exports the given function or method's body, if +// appropriate. local indicates whether it's a local function or +// method available on a locally declared type. (Due to cross-package +// type aliases, a method may be imported, but still available on a +// locally declared type.) +func (l *linker) exportBody(obj *ir.Name, local bool) { + assert(obj.Op() == ir.ONAME && obj.Class == ir.PFUNC) + + fn := obj.Func + if fn.Inl == nil { + return // not inlinable anyway + } + + // As a simple heuristic, if the function was declared in this + // package or we inlined it somewhere in this package, then we'll + // (re)export the function body. This isn't perfect, but seems + // reasonable in practice. In particular, it has the nice property + // that in the worst case, adding a blank import ensures the + // function body is available for inlining. + // + // TODO(mdempsky): Reimplement the reachable method crawling logic + // from typecheck/crawler.go. + exportBody := local || fn.Inl.Body != nil + if !exportBody { + return + } + + sym := obj.Sym() + if _, ok := l.bodies[sym]; ok { + // Due to type aliases, we might visit methods multiple times. + base.AssertfAt(obj.Type().Recv() != nil, obj.Pos(), "expected method: %v", obj) + return + } + + pri, ok := bodyReaderFor(fn) + assert(ok) + l.bodies[sym] = l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx) +} + +// relocCommon copies the specified element from pr into w, +// recursively relocating any referenced elements as well. func (l *linker) relocCommon(pr *pkgReader, w *pkgbits.Encoder, k pkgbits.RelocKind, idx pkgbits.Index) { r := pr.NewDecoderRaw(k, idx) w.Relocs = l.relocAll(pr, r.Relocs) @@ -220,10 +287,6 @@ func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) { if inl := name.Func.Inl; w.Bool(inl != nil) { w.Len(int(inl.Cost)) w.Bool(inl.CanDelayResults) - - pri, ok := bodyReader[name.Func] - assert(ok) - w.Reloc(pkgbits.RelocBody, l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx)) } w.Sync(pkgbits.SyncEOF) diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go index c4cb9b9a2c2..a22577f9656 100644 --- a/src/cmd/compile/internal/noder/quirks.go +++ b/src/cmd/compile/internal/noder/quirks.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,12 +10,12 @@ import ( "cmd/compile/internal/syntax" ) -// This file defines helper functions useful for satisfying toolstash -// -cmp when compared against the legacy frontend behavior, but can be -// removed after that's no longer a concern. - // typeExprEndPos returns the position that noder would leave base.Pos // after parsing the given type expression. +// +// Deprecated: This function exists to emulate position semantics from +// Go 1.17, necessary for compatibility with the backend DWARF +// generation logic that assigns variables to their appropriate scope. func typeExprEndPos(expr0 syntax.Expr) syntax.Pos { for { switch expr := expr0.(type) { diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 296cdd7d548..d02d05bc5d2 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -19,6 +17,7 @@ import ( "cmd/compile/internal/dwarfgen" "cmd/compile/internal/inline" "cmd/compile/internal/ir" + "cmd/compile/internal/objw" "cmd/compile/internal/reflectdata" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -26,15 +25,25 @@ import ( "cmd/internal/src" ) +// This file implements cmd/compile backend's reader for the Unified +// IR export data. + +// A pkgReader reads Unified IR export data. type pkgReader struct { pkgbits.PkgDecoder + // Indices for encoded things; lazily populated as needed. + // + // Note: Objects (i.e., ir.Names) are lazily instantiated by + // populating their types.Sym.Def; see objReader below. + posBases []*src.PosBase pkgs []*types.Pkg typs []*types.Type - // offset for rewriting the given index into the output, - // but bitwise inverted so we can detect if we're missing the entry or not. + // offset for rewriting the given (absolute!) index into the output, + // but bitwise inverted so we can detect if we're missing the entry + // or not. newindex []pkgbits.Index } @@ -50,15 +59,19 @@ func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader { } } +// A pkgReaderIndex compactly identifies an index (and its +// corresponding dictionary) within a package's export data. type pkgReaderIndex struct { - pr *pkgReader - idx pkgbits.Index - dict *readerDict + pr *pkgReader + idx pkgbits.Index + dict *readerDict + shapedFn *ir.Func } func (pri pkgReaderIndex) asReader(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *reader { r := pri.pr.newReader(k, pri.idx, marker) r.dict = pri.dict + r.shapedFn = pri.shapedFn return r } @@ -69,6 +82,7 @@ func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pk } } +// A writer provides APIs for reading an individual element. type reader struct { pkgbits.Decoder @@ -87,6 +101,12 @@ type reader struct { funarghack bool + // shapedFn is the shape-typed version of curfn, if any. + shapedFn *ir.Func + + // dictParam is the .dict param, if any. + dictParam *ir.Name + // scopeVars is a stack tracking the number of variables declared in // the current function at the moment each open scope was opened. scopeVars []int @@ -108,7 +128,13 @@ type reader struct { // Label to return to. retlabel *types.Sym - inlvars, retvars ir.Nodes + // inlvars is the list of variables that the inlinee's arguments are + // assigned to, one for each receiver and normal parameter, in order. + inlvars ir.Nodes + + // retvars is the list of variables that the inlinee's results are + // assigned to, one for each result parameter, in order. + retvars ir.Nodes } type readerDict struct { @@ -143,6 +169,8 @@ type readerDict struct { funcsObj []ir.Node itabs []itabInfo2 + + methodExprs []ir.Node } type itabInfo2 struct { @@ -162,6 +190,7 @@ func setValue(name *ir.Name, val constant.Value) { // @@@ Positions +// pos reads a position from the bitstream. func (r *reader) pos() src.XPos { return base.Ctxt.PosTable.XPos(r.pos0()) } @@ -178,10 +207,13 @@ func (r *reader) pos0() src.Pos { return src.MakePos(posBase, line, col) } +// posBase reads a position base from the bitstream. func (r *reader) posBase() *src.PosBase { return r.inlPosBase(r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))) } +// posBaseIdx returns the specified position base, reading it first if +// needed. func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *src.PosBase { if b := pr.posBases[idx]; b != nil { return b @@ -222,6 +254,7 @@ func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *src.PosBase { return b } +// TODO(mdempsky): Document this. func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase { if r.inlCall == nil { return oldBase @@ -236,36 +269,23 @@ func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase { return newBase } +// TODO(mdempsky): Document this. func (r *reader) updatePos(xpos src.XPos) src.XPos { pos := base.Ctxt.PosTable.Pos(xpos) pos.SetBase(r.inlPosBase(pos.Base())) return base.Ctxt.PosTable.XPos(pos) } -func (r *reader) origPos(xpos src.XPos) src.XPos { - if r.inlCall == nil { - return xpos - } - - pos := base.Ctxt.PosTable.Pos(xpos) - for old, new := range r.inlPosBases { - if pos.Base() == new { - pos.SetBase(old) - return base.Ctxt.PosTable.XPos(pos) - } - } - - base.FatalfAt(xpos, "pos base missing from inlPosBases") - panic("unreachable") -} - // @@@ Packages +// pkg reads a package reference from the bitstream. func (r *reader) pkg() *types.Pkg { r.Sync(pkgbits.SyncPkg) return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) } +// pkgIdx returns the specified package from the export data, reading +// it first if needed. func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Pkg { if pkg := pr.pkgs[idx]; pkg != nil { return pkg @@ -276,6 +296,7 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Pkg { return pkg } +// doPkg reads a package definition from the bitstream. func (r *reader) doPkg() *types.Pkg { path := r.String() switch path { @@ -288,7 +309,6 @@ func (r *reader) doPkg() *types.Pkg { } name := r.String() - height := r.Len() pkg := types.NewPkg(path, "") @@ -298,12 +318,6 @@ func (r *reader) doPkg() *types.Pkg { base.Assertf(pkg.Name == name, "package %q has name %q, but want %q", pkg.Path, pkg.Name, name) } - if pkg.Height == 0 { - pkg.Height = height - } else { - base.Assertf(pkg.Height == height, "package %q has height %v, but want %v", pkg.Path, pkg.Height, height) - } - return pkg } @@ -536,8 +550,12 @@ func (r *reader) param() (*types.Pkg, *types.Field) { // @@@ Objects +// objReader maps qualified identifiers (represented as *types.Sym) to +// a pkgReader and corresponding index that can be used for reading +// that object's definition. var objReader = map[*types.Sym]pkgReaderIndex{} +// obj reads an instantiated object reference from the bitstream. func (r *reader) obj() ir.Node { r.Sync(pkgbits.SyncObject) @@ -573,6 +591,8 @@ func (r *reader) obj() ir.Node { return r.p.objIdx(idx, implicits, explicits) } +// objIdx returns the specified object from the bitstream, +// instantiated with the given type arguments, if any. func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type) ir.Node { rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) _, sym := rname.qualifiedIdent() @@ -605,6 +625,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ do := func(op ir.Op, hasTParams bool) *ir.Name { pos := r.pos() + setBasePos(pos) if hasTParams { r.typeParamNames() } @@ -712,6 +733,7 @@ func (r *reader) mangle(sym *types.Sym) *types.Sym { return sym.Pkg.Lookup(buf.String()) } +// objDictIdx reads and returns the specified object dictionary. func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type) *readerDict { r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) @@ -730,12 +752,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex // For stenciling, we can just skip over the type parameters. for range dict.targs[dict.implicits:] { // Skip past bounds without actually evaluating them. - r.Sync(pkgbits.SyncType) - if r.Bool() { - r.Len() - } else { - r.Reloc(pkgbits.RelocType) - } + r.typInfo() } dict.derived = make([]derivedInfo, r.Len()) @@ -771,6 +788,14 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex dict.itabs[i] = itabInfo2{typ: typ, lsym: lsym} } + dict.methodExprs = make([]ir.Node, r.Len()) + for i := range dict.methodExprs { + recv := pr.typIdx(typeInfo{idx: pkgbits.Index(r.Len()), derived: true}, &dict, true) + _, sym := r.selector() + + dict.methodExprs[i] = typecheck.Expr(ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, ir.TypeNode(recv), sym)) + } + return &dict } @@ -791,9 +816,7 @@ func (r *reader) method(rext *reader) *types.Field { _, recv := r.param() typ := r.signature(pkg, recv) - fnsym := sym - fnsym = ir.MethodSym(recv.Type, fnsym) - name := ir.NewNameAt(pos, fnsym) + name := ir.NewNameAt(pos, ir.MethodSym(recv.Type, sym)) setType(name, typ) name.Func = ir.NewFunc(r.pos()) @@ -882,6 +905,8 @@ func (r *reader) funcExt(name *ir.Name) { typecheck.Func(fn) if r.Bool() { + assert(name.Defn == nil) + fn.ABI = obj.ABI(r.Uint64()) // Escape analysis. @@ -896,7 +921,6 @@ func (r *reader) funcExt(name *ir.Name) { Cost: int32(r.Len()), CanDelayResults: r.Bool(), } - r.addBody(name.Func) } } else { r.addBody(name.Func) @@ -952,24 +976,71 @@ func (r *reader) pragmaFlag() ir.PragmaFlag { // @@@ Function bodies -// bodyReader tracks where the serialized IR for a function's body can -// be found. +// bodyReader tracks where the serialized IR for a local or imported, +// generic function's body can be found. var bodyReader = map[*ir.Func]pkgReaderIndex{} +// importBodyReader tracks where the serialized IR for an imported, +// static (i.e., non-generic) function body can be read. +var importBodyReader = map[*types.Sym]pkgReaderIndex{} + +// bodyReaderFor returns the pkgReaderIndex for reading fn's +// serialized IR, and whether one was found. +func bodyReaderFor(fn *ir.Func) (pri pkgReaderIndex, ok bool) { + if fn.Nname.Defn != nil { + pri, ok = bodyReader[fn] + base.AssertfAt(ok, base.Pos, "must have bodyReader for %v", fn) // must always be available + } else { + pri, ok = importBodyReader[fn.Sym()] + } + return +} + // todoBodies holds the list of function bodies that still need to be // constructed. var todoBodies []*ir.Func +// addBody reads a function body reference from the element bitstream, +// and associates it with fn. func (r *reader) addBody(fn *ir.Func) { - pri := pkgReaderIndex{r.p, r.Reloc(pkgbits.RelocBody), r.dict} - bodyReader[fn] = pri + // addBody should only be called for local functions or imported + // generic functions; see comment in funcExt. + assert(fn.Nname.Defn != nil) - if fn.Nname.Defn == nil { - // Don't read in function body for imported functions. - // See comment in funcExt. - return + idx := r.Reloc(pkgbits.RelocBody) + + var shapedFn *ir.Func + if r.hasTypeParams() && fn.OClosure == nil { + name := fn.Nname + sym := name.Sym() + + shapedSym := sym.Pkg.Lookup(sym.Name + "-shaped") + + // TODO(mdempsky): Once we actually start shaping functions, we'll + // need to deduplicate them. + shaped := ir.NewDeclNameAt(name.Pos(), ir.ONAME, shapedSym) + setType(shaped, shapeSig(fn, r.dict)) // TODO(mdempsky): Use shape types. + + shapedFn = ir.NewFunc(fn.Pos()) + shaped.Func = shapedFn + shapedFn.Nname = shaped + shapedFn.SetDupok(true) + + shaped.Class = 0 // so MarkFunc doesn't complain + ir.MarkFunc(shaped) + + shaped.Defn = shapedFn + + shapedFn.Pragma = fn.Pragma // TODO(mdempsky): How does stencil.go handle pragmas? + typecheck.Func(shapedFn) + + bodyReader[shapedFn] = pkgReaderIndex{r.p, idx, r.dict, nil} + todoBodies = append(todoBodies, shapedFn) } + pri := pkgReaderIndex{r.p, idx, r.dict, shapedFn} + bodyReader[fn] = pri + if r.curfn == nil { todoBodies = append(todoBodies, fn) return @@ -983,9 +1054,14 @@ func (pri pkgReaderIndex) funcBody(fn *ir.Func) { r.funcBody(fn) } +// funcBody reads a function body definition from the element +// bitstream, and populates fn with it. func (r *reader) funcBody(fn *ir.Func) { r.curfn = fn r.closureVars = fn.ClosureVars + if len(r.closureVars) != 0 && r.hasTypeParams() { + r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit + } ir.WithFunc(fn, func() { r.funcargs(fn) @@ -994,6 +1070,11 @@ func (r *reader) funcBody(fn *ir.Func) { return } + if r.shapedFn != nil { + r.callShaped(fn.Pos()) + return + } + body := r.stmts() if body == nil { body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(src.NoXPos, nil))} @@ -1005,6 +1086,139 @@ func (r *reader) funcBody(fn *ir.Func) { r.marker.WriteTo(fn) } +// callShaped emits a tail call to r.shapedFn, passing along the +// arguments to the current function. +func (r *reader) callShaped(pos src.XPos) { + sig := r.curfn.Nname.Type() + + var args ir.Nodes + + // First argument is a pointer to the -dict global variable. + args.Append(r.dictPtr()) + + // Collect the arguments to the current function, so we can pass + // them along to the shaped function. (This is unfortunately quite + // hairy.) + for _, params := range &types.RecvsParams { + for _, param := range params(sig).FieldSlice() { + var arg ir.Node + if param.Nname != nil { + name := param.Nname.(*ir.Name) + if !ir.IsBlank(name) { + if r.inlCall != nil { + // During inlining, we want the respective inlvar where we + // assigned the callee's arguments. + arg = r.inlvars[len(args)-1] + } else { + // Otherwise, we can use the parameter itself directly. + base.AssertfAt(name.Curfn == r.curfn, name.Pos(), "%v has curfn %v, but want %v", name, name.Curfn, r.curfn) + arg = name + } + } + } + + // For anonymous and blank parameters, we don't have an *ir.Name + // to use as the argument. However, since we know the shaped + // function won't use the value either, we can just pass the + // zero value. (Also unfortunately, we don't have an easy + // zero-value IR node; so we use a default-initialized temporary + // variable.) + if arg == nil { + tmp := typecheck.TempAt(pos, r.curfn, param.Type) + r.curfn.Body.Append( + typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)), + typecheck.Stmt(ir.NewAssignStmt(pos, tmp, nil)), + ) + arg = tmp + } + + args.Append(arg) + } + } + + // Mark the function as a wrapper so it doesn't show up in stack + // traces. + r.curfn.SetWrapper(true) + + call := typecheck.Call(pos, r.shapedFn.Nname, args, sig.IsVariadic()).(*ir.CallExpr) + + var stmt ir.Node + if sig.NumResults() != 0 { + stmt = typecheck.Stmt(ir.NewReturnStmt(pos, []ir.Node{call})) + } else { + stmt = call + } + r.curfn.Body.Append(stmt) +} + +// dictPtr returns a pointer to the runtime dictionary variable needed +// for the current function to call its shaped variant. +func (r *reader) dictPtr() ir.Node { + var fn *ir.Func + if r.inlCall != nil { + // During inlining, r.curfn is named after the caller (not the + // callee), because it's relevant to closure naming, sigh. + fn = r.inlFunc + } else { + fn = r.curfn + } + + var baseSym *types.Sym + if recv := fn.Nname.Type().Recv(); recv != nil { + // All methods of a given instantiated receiver type share the + // same dictionary. + baseSym = deref(recv.Type).Sym() + } else { + baseSym = fn.Nname.Sym() + } + + sym := baseSym.Pkg.Lookup(baseSym.Name + "-dict") + + if sym.Def == nil { + dict := ir.NewNameAt(r.curfn.Pos(), sym) + dict.Class = ir.PEXTERN + + lsym := dict.Linksym() + ot := 0 + + for idx, info := range r.dict.derived { + if info.needed { + typ := r.p.typIdx(typeInfo{idx: pkgbits.Index(idx), derived: true}, r.dict, false) + rtype := reflectdata.TypeLinksym(typ) + ot = objw.SymPtr(lsym, ot, rtype, 0) + } else { + // TODO(mdempsky): Compact unused runtime dictionary space. + ot = objw.Uintptr(lsym, ot, 0) + } + } + + // TODO(mdempsky): Write out more dictionary information. + + objw.Global(lsym, int32(ot), obj.DUPOK|obj.RODATA) + + dict.SetType(r.dict.varType()) + dict.SetTypecheck(1) + + sym.Def = dict + } + + return typecheck.Expr(ir.NewAddrExpr(r.curfn.Pos(), sym.Def.(*ir.Name))) +} + +// numWords returns the number of words that dict's runtime dictionary +// variable requires. +func (dict *readerDict) numWords() int64 { + var num int + num += len(dict.derivedTypes) + // TODO(mdempsky): Add space for more dictionary information. + return int64(num) +} + +// varType returns the type of dict's runtime dictionary variable. +func (dict *readerDict) varType() *types.Type { + return types.NewArray(types.Types[types.TUINTPTR], dict.numWords()) +} + func (r *reader) funcargs(fn *ir.Func) { sig := fn.Nname.Type() @@ -1062,16 +1276,20 @@ func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) { func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) { assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT) - r.Sync(pkgbits.SyncAddLocal) - if pkgbits.EnableSync { - want := r.Int() - if have := len(r.locals); have != want { - base.FatalfAt(name.Pos(), "locals table has desynced") + if name.Sym().Name == dictParamName { + r.dictParam = name + } else { + r.Sync(pkgbits.SyncAddLocal) + if r.p.SyncMarkers() { + want := r.Int() + if have := len(r.locals); have != want { + base.FatalfAt(name.Pos(), "locals table has desynced") + } } + r.locals = append(r.locals, name) } name.SetUsed(true) - r.locals = append(r.locals, name) // TODO(mdempsky): Move earlier. if ir.IsBlank(name) { @@ -1229,11 +1447,8 @@ func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node { case stmtAssign: pos := r.pos() - - // TODO(mdempsky): After quirks mode is gone, swap these - // statements so we visit LHS before RHS again. - rhs := r.exprList() names, lhs := r.assignList() + rhs := r.multiExpr() if len(rhs) == 0 { for _, name := range names { @@ -1301,7 +1516,7 @@ func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node { case stmtReturn: pos := r.pos() - results := r.exprList() + results := r.multiExpr() return ir.NewReturnStmt(pos, results) case stmtSelect: @@ -1323,25 +1538,42 @@ func (r *reader) assignList() ([]*ir.Name, []ir.Node) { var names []*ir.Name for i := range lhs { - if r.Bool() { - pos := r.pos() - _, sym := r.localIdent() - typ := r.typ() - - name := ir.NewNameAt(pos, sym) - lhs[i] = name - names = append(names, name) - setType(name, typ) - r.addLocal(name, ir.PAUTO) - continue + expr, def := r.assign() + lhs[i] = expr + if def { + names = append(names, expr.(*ir.Name)) } - - lhs[i] = r.expr() } return names, lhs } +// assign returns an assignee expression. It also reports whether the +// returned expression is a newly declared variable. +func (r *reader) assign() (ir.Node, bool) { + switch tag := codeAssign(r.Code(pkgbits.SyncAssign)); tag { + default: + panic("unhandled assignee expression") + + case assignBlank: + return typecheck.AssignExpr(ir.BlankNode), false + + case assignDef: + pos := r.pos() + setBasePos(pos) + _, sym := r.localIdent() + typ := r.typ() + + name := ir.NewNameAt(pos, sym) + setType(name, typ) + r.addLocal(name, ir.PAUTO) + return name, true + + case assignExpr: + return r.expr(), false + } +} + func (r *reader) blockStmt() []ir.Node { r.Sync(pkgbits.SyncBlockStmt) r.openScope() @@ -1357,16 +1589,10 @@ func (r *reader) forStmt(label *types.Sym) ir.Node { if r.Bool() { pos := r.pos() + rang := ir.NewRangeStmt(pos, nil, nil, nil, nil) + rang.Label = label - // TODO(mdempsky): After quirks mode is gone, swap these - // statements so we read LHS before X again. - x := r.expr() names, lhs := r.assignList() - - body := r.blockStmt() - r.closeAnotherScope() - - rang := ir.NewRangeStmt(pos, nil, nil, x, body) if len(lhs) >= 1 { rang.Key = lhs[0] if len(lhs) >= 2 { @@ -1374,13 +1600,27 @@ func (r *reader) forStmt(label *types.Sym) ir.Node { } } rang.Def = r.initDefn(rang, names) - rang.Label = label + + rang.X = r.expr() + if rang.X.Type().IsMap() { + rang.RType = r.rtype(pos) + } + if rang.Key != nil && !ir.IsBlank(rang.Key) { + rang.KeyTypeWord, rang.KeySrcRType = r.convRTTI(pos) + } + if rang.Value != nil && !ir.IsBlank(rang.Value) { + rang.ValueTypeWord, rang.ValueSrcRType = r.convRTTI(pos) + } + + rang.Body = r.blockStmt() + r.closeAnotherScope() + return rang } pos := r.pos() init := r.stmt() - cond := r.expr() + cond := r.optExpr() post := r.stmt() body := r.blockStmt() r.closeAnotherScope() @@ -1419,6 +1659,44 @@ func (r *reader) selectStmt(label *types.Sym) ir.Node { comm := r.stmt() body := r.stmts() + // "case i = <-c: ..." may require an implicit conversion (e.g., + // see fixedbugs/bug312.go). Currently, typecheck throws away the + // implicit conversion and relies on it being reinserted later, + // but that would lose any explicit RTTI operands too. To preserve + // RTTI, we rewrite this as "case tmp := <-c: i = tmp; ...". + if as, ok := comm.(*ir.AssignStmt); ok && as.Op() == ir.OAS && !as.Def { + if conv, ok := as.Y.(*ir.ConvExpr); ok && conv.Op() == ir.OCONVIFACE { + base.AssertfAt(conv.Implicit(), conv.Pos(), "expected implicit conversion: %v", conv) + + recv := conv.X + base.AssertfAt(recv.Op() == ir.ORECV, recv.Pos(), "expected receive expression: %v", recv) + + tmp := r.temp(pos, recv.Type()) + + // Replace comm with `tmp := <-c`. + tmpAs := ir.NewAssignStmt(pos, tmp, recv) + tmpAs.Def = true + tmpAs.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) + comm = tmpAs + + // Change original assignment to `i = tmp`, and prepend to body. + conv.X = tmp + body = append([]ir.Node{as}, body...) + } + } + + // multiExpr will have desugared a comma-ok receive expression + // into a separate statement. However, the rest of the compiler + // expects comm to be the OAS2RECV statement itself, so we need to + // shuffle things around to fit that pattern. + if as2, ok := comm.(*ir.AssignListStmt); ok && as2.Op() == ir.OAS2 { + init := ir.TakeInit(as2.Rhs[0]) + base.AssertfAt(len(init) == 1 && init[0].Op() == ir.OAS2RECV, as2.Pos(), "unexpected assignment: %+v", as2) + + comm = init[0] + body = append([]ir.Node{as2}, body...) + } + clauses[i] = ir.NewCommStmt(pos, comm, body) } if len(clauses) > 0 { @@ -1450,7 +1728,7 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node { iface = x.Type() tag = ir.NewTypeSwitchGuard(pos, ident, x) } else { - tag = r.expr() + tag = r.optExpr() } clauses := make([]*ir.CaseClause, r.Len()) @@ -1461,20 +1739,43 @@ func (r *reader) switchStmt(label *types.Sym) ir.Node { r.openScope() pos := r.pos() - var cases []ir.Node + var cases, rtypes []ir.Node if iface != nil { cases = make([]ir.Node, r.Len()) if len(cases) == 0 { cases = nil // TODO(mdempsky): Unclear if this matters. } for i := range cases { - cases[i] = r.exprType(true) + if r.Bool() { // case nil + cases[i] = typecheck.Expr(types.BuiltinPkg.Lookup("nil").Def.(*ir.NilExpr)) + } else { + cases[i] = r.exprType() + } } } else { cases = r.exprList() + + // For `switch { case any(true): }` (e.g., issue 3980 in + // test/switch.go), the backend still creates a mixed bool/any + // comparison, and we need to explicitly supply the RTTI for the + // comparison. + // + // TODO(mdempsky): Change writer.go to desugar "switch {" into + // "switch true {", which we already handle correctly. + if tag == nil { + for i, cas := range cases { + if cas.Type().IsEmptyInterface() { + for len(rtypes) < i { + rtypes = append(rtypes, nil) + } + rtypes = append(rtypes, reflectdata.TypePtrAt(cas.Pos(), types.Types[types.TBOOL])) + } + } + } } clause := ir.NewCaseStmt(pos, cases, nil) + clause.RTypes = rtypes if ident != nil { pos := r.pos() @@ -1551,25 +1852,14 @@ func (r *reader) expr() (res ir.Node) { default: panic("unhandled expression") - case exprNone: - return nil - - case exprBlank: - // blank only allowed in LHS of assignments - // TODO(mdempsky): Handle directly in assignList instead? - return typecheck.AssignExpr(ir.BlankNode) - case exprLocal: return typecheck.Expr(r.useLocal()) - case exprName: + case exprGlobal: // Callee instead of Expr allows builtins // TODO(mdempsky): Handle builtins directly in exprCall, like method calls? return typecheck.Callee(r.obj()) - case exprType: - return r.exprType(false) - case exprConst: pos := r.pos() typ := r.typ() @@ -1578,6 +1868,11 @@ func (r *reader) expr() (res ir.Node) { orig := r.String() return typecheck.Expr(OrigConst(pos, typ, val, op, orig)) + case exprNil: + pos := r.pos() + typ := r.typ() + return Nil(pos, typ) + case exprCompLit: return r.compLit() @@ -1585,17 +1880,20 @@ func (r *reader) expr() (res ir.Node) { return r.funcLit() case exprSelector: - x := r.expr() - pos := r.pos() - _, sym := r.selector() + var x ir.Node + if r.Bool() { // MethodExpr + if r.Bool() { + return r.dict.methodExprs[r.Len()] + } - // Method expression with derived receiver type. - if x.Op() == ir.ODYNAMICTYPE { - // TODO(mdempsky): Handle with runtime dictionary lookup. - n := ir.TypeNode(x.Type()) + n := ir.TypeNode(r.typ()) n.SetTypecheck(1) x = n + } else { // FieldVal, MethodVal + x = r.expr() } + pos := r.pos() + _, sym := r.selector() n := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr) if n.Op() == ir.OMETHVALUE { @@ -1615,14 +1913,20 @@ func (r *reader) expr() (res ir.Node) { x := r.expr() pos := r.pos() index := r.expr() - return typecheck.Expr(ir.NewIndexExpr(pos, x, index)) + n := typecheck.Expr(ir.NewIndexExpr(pos, x, index)) + switch n.Op() { + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + n.RType = r.rtype(pos) + } + return n case exprSlice: x := r.expr() pos := r.pos() var index [3]ir.Node for i := range index { - index[i] = r.expr() + index[i] = r.optExpr() } op := ir.OSLICE if index[2] != nil { @@ -1633,10 +1937,13 @@ func (r *reader) expr() (res ir.Node) { case exprAssert: x := r.expr() pos := r.pos() - typ := r.exprType(false) + typ := r.exprType() + srcRType := r.rtype(pos) + // TODO(mdempsky): Always emit ODYNAMICDOTTYPE for uniformity? if typ, ok := typ.(*ir.DynamicType); ok && typ.Op() == ir.ODYNAMICTYPE { assert := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, x, typ.RType) + assert.SrcRType = srcRType assert.ITab = typ.ITab return typed(typ.Type(), assert) } @@ -1675,13 +1982,43 @@ func (r *reader) expr() (res ir.Node) { fun = typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, fun, sym)) } pos := r.pos() - args := r.exprs() + args := r.multiExpr() dots := r.Bool() - return typecheck.Call(pos, fun, args, dots) + n := typecheck.Call(pos, fun, args, dots) + switch n.Op() { + case ir.OAPPEND: + n := n.(*ir.CallExpr) + n.RType = r.rtype(pos) + case ir.OCOPY: + n := n.(*ir.BinaryExpr) + n.RType = r.rtype(pos) + case ir.ODELETE: + n := n.(*ir.CallExpr) + n.RType = r.rtype(pos) + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + n.RType = r.rtype(pos) + } + return n + + case exprMake: + pos := r.pos() + typ := r.exprType() + extra := r.exprs() + n := typecheck.Expr(ir.NewCallExpr(pos, ir.OMAKE, nil, append([]ir.Node{typ}, extra...))).(*ir.MakeExpr) + n.RType = r.rtype(pos) + return n + + case exprNew: + pos := r.pos() + typ := r.exprType() + return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ)) case exprConvert: + implicit := r.Bool() typ := r.typ() pos := r.pos() + typeWord, srcRType := r.convRTTI(pos) x := r.expr() // TODO(mdempsky): Stop constructing expressions of untyped type. @@ -1697,10 +2034,74 @@ func (r *reader) expr() (res ir.Node) { base.ErrorExit() // harsh, but prevents constructing invalid IR } - return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, typ, x)) + n := ir.NewConvExpr(pos, ir.OCONV, typ, x) + n.TypeWord, n.SrcRType = typeWord, srcRType + if implicit { + n.SetImplicit(true) + } + return typecheck.Expr(n) } } +func (r *reader) optExpr() ir.Node { + if r.Bool() { + return r.expr() + } + return nil +} + +func (r *reader) multiExpr() []ir.Node { + r.Sync(pkgbits.SyncMultiExpr) + + if r.Bool() { // N:1 + pos := r.pos() + expr := r.expr() + + results := make([]ir.Node, r.Len()) + as := ir.NewAssignListStmt(pos, ir.OAS2, nil, []ir.Node{expr}) + as.Def = true + for i := range results { + tmp := r.temp(pos, r.typ()) + as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) + as.Lhs.Append(tmp) + + res := ir.Node(tmp) + if r.Bool() { + n := ir.NewConvExpr(pos, ir.OCONV, r.typ(), res) + n.TypeWord, n.SrcRType = r.convRTTI(pos) + n.SetImplicit(true) + res = typecheck.Expr(n) + } + results[i] = res + } + + // TODO(mdempsky): Could use ir.InlinedCallExpr instead? + results[0] = ir.InitExpr([]ir.Node{typecheck.Stmt(as)}, results[0]) + return results + } + + // N:N + exprs := make([]ir.Node, r.Len()) + if len(exprs) == 0 { + return nil + } + for i := range exprs { + exprs[i] = r.expr() + } + return exprs +} + +// temp returns a new autotemp of the specified type. +func (r *reader) temp(pos src.XPos, typ *types.Type) *ir.Name { + // See typecheck.typecheckargs. + curfn := r.curfn + if curfn == nil { + curfn = typecheck.InitTodoFunc + } + + return typecheck.TempAt(pos, curfn, typ) +} + func (r *reader) compLit() ir.Node { r.Sync(pkgbits.SyncCompLit) pos := r.pos() @@ -1713,6 +2114,10 @@ func (r *reader) compLit() ir.Node { if typ.Kind() == types.TFORW { base.FatalfAt(pos, "unresolved composite literal type: %v", typ) } + var rtype ir.Node + if typ.IsMap() { + rtype = r.rtype(pos) + } isStruct := typ.Kind() == types.TSTRUCT elems := make([]ir.Node, r.Len()) @@ -1731,6 +2136,10 @@ func (r *reader) compLit() ir.Node { } lit := typecheck.Expr(ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, elems)) + if rtype != nil { + lit := lit.(*ir.CompLitExpr) + lit.RType = rtype + } if typ0.IsPtr() { lit = typecheck.Expr(typecheck.NodAddrAt(pos, lit)) lit.SetType(typ0) @@ -1775,6 +2184,11 @@ func (r *reader) funcLit() ir.Node { for len(fn.ClosureVars) < cap(fn.ClosureVars) { ir.NewClosureVar(r.pos(), fn, r.useLocal()) } + if param := r.dictParam; param != nil { + // If we have a dictionary parameter, capture it too. For + // simplicity, we capture it last and unconditionally. + ir.NewClosureVar(param.Pos(), fn, param) + } r.addBody(fn) @@ -1799,17 +2213,73 @@ func (r *reader) exprs() []ir.Node { return nodes } -func (r *reader) exprType(nilOK bool) ir.Node { - r.Sync(pkgbits.SyncExprType) +// dictWord returns an expression to return the specified +// uintptr-typed word from the dictionary parameter. +func (r *reader) dictWord(pos src.XPos, idx int64) ir.Node { + base.AssertfAt(r.dictParam != nil, pos, "expected dictParam in %v", r.curfn) + return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewBasicLit(pos, constant.MakeInt64(idx)))) +} - if nilOK && r.Bool() { - return typecheck.Expr(types.BuiltinPkg.Lookup("nil").Def.(*ir.NilExpr)) +// rtype reads a type reference from the element bitstream, and +// returns an expression of type *runtime._type representing that +// type. +func (r *reader) rtype(pos src.XPos) ir.Node { + r.Sync(pkgbits.SyncRType) + return r.rtypeInfo(pos, r.typInfo()) +} + +// rtypeInfo returns an expression of type *runtime._type representing +// the given decoded type reference. +func (r *reader) rtypeInfo(pos src.XPos, info typeInfo) ir.Node { + if !info.derived { + typ := r.p.typIdx(info, r.dict, true) + return reflectdata.TypePtrAt(pos, typ) + } + return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TUINT8]), r.dictWord(pos, int64(info.idx)))) +} + +// convRTTI returns expressions appropriate for populating an +// ir.ConvExpr's TypeWord and SrcRType fields, respectively. +func (r *reader) convRTTI(pos src.XPos) (typeWord, srcRType ir.Node) { + r.Sync(pkgbits.SyncConvRTTI) + srcInfo := r.typInfo() + dstInfo := r.typInfo() + + dst := r.p.typIdx(dstInfo, r.dict, true) + if !dst.IsInterface() { + return } + src := r.p.typIdx(srcInfo, r.dict, true) + + // See reflectdata.ConvIfaceTypeWord. + switch { + case dst.IsEmptyInterface(): + if !src.IsInterface() { + typeWord = r.rtypeInfo(pos, srcInfo) // direct eface construction + } + case !src.IsInterface(): + typeWord = reflectdata.ITabAddrAt(pos, src, dst) // direct iface construction + default: + typeWord = r.rtypeInfo(pos, dstInfo) // convI2I + } + + // See reflectdata.ConvIfaceSrcRType. + if !src.IsInterface() { + srcRType = r.rtypeInfo(pos, srcInfo) + } + + return +} + +func (r *reader) exprType() ir.Node { + r.Sync(pkgbits.SyncExprType) + pos := r.pos() + setBasePos(pos) lsymPtr := func(lsym *obj.LSym) ir.Node { - return typecheck.Expr(typecheck.NodAddr(ir.NewLinksymExpr(pos, lsym, types.Types[types.TUINT8]))) + return typecheck.Expr(typecheck.NodAddrAt(pos, ir.NewLinksymExpr(pos, lsym, types.Types[types.TUINT8]))) } var typ *types.Type @@ -1836,7 +2306,7 @@ func (r *reader) exprType(nilOK bool) ir.Node { return n } - rtype = lsymPtr(reflectdata.TypeLinksym(typ)) + rtype = r.rtypeInfo(pos, info) } dt := ir.NewDynamicType(pos, rtype) @@ -1974,13 +2444,20 @@ func (r *reader) pkgObjs(target *ir.Package) []*ir.Name { var inlgen = 0 +// InlineCall implements inline.NewInline by re-reading the function +// body from its Unified IR export data. func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { // TODO(mdempsky): Turn callerfn into an explicit parameter. callerfn := ir.CurFunc - pri, ok := bodyReader[fn] + pri, ok := bodyReaderFor(fn) if !ok { - base.FatalfAt(call.Pos(), "missing function body for call to %v", fn) + // TODO(mdempsky): Reconsider this diagnostic's wording, if it's + // to be included in Go 1.20. + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "cannot inline call to %v: missing inline body", fn) + } + return nil } if fn.Inl.Body == nil { @@ -2008,6 +2485,9 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp for i, cv := range r.inlFunc.ClosureVars { r.closureVars[i] = cv.Outer } + if len(r.closureVars) != 0 && r.hasTypeParams() { + r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit + } r.funcargs(fn) @@ -2070,8 +2550,12 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp nparams := len(r.curfn.Dcl) ir.WithFunc(r.curfn, func() { - r.curfn.Body = r.stmts() - r.curfn.Endlineno = r.pos() + if r.shapedFn != nil { + r.callShaped(call.Pos()) + } else { + r.curfn.Body = r.stmts() + r.curfn.Endlineno = r.pos() + } // TODO(mdempsky): This shouldn't be necessary. Inlining might // read in new function/method declarations, which could @@ -2260,18 +2744,21 @@ func (r *reader) needWrapper(typ *types.Type) { } } +// importedDef reports whether r is reading from an imported and +// non-generic element. +// +// If a type was found in an imported package, then we can assume that +// package (or one of its transitive dependencies) already generated +// method wrappers for it. +// +// Exception: If we're instantiating an imported generic type or +// function, we might be instantiating it with type arguments not +// previously seen before. +// +// TODO(mdempsky): Distinguish when a generic function or type was +// instantiated in an imported package so that we can add types to +// haveWrapperTypes instead. func (r *reader) importedDef() bool { - // If a type was found in an imported package, then we can assume - // that package (or one of its transitive dependencies) already - // generated method wrappers for it. - // - // Exception: If we're instantiating an imported generic type or - // function, we might be instantiating it with type arguments not - // previously seen before. - // - // TODO(mdempsky): Distinguish when a generic function or type was - // instantiated in an imported package so that we can add types to - // haveWrapperTypes instead. return r.p != localPkgReader && !r.hasTypeParams() } @@ -2459,6 +2946,15 @@ func finishWrapperFunc(fn *ir.Func, target *ir.Package) { // so we're responsible for applying inlining ourselves here. inline.InlineCalls(fn) + // The body of wrapper function after inlining may reveal new ir.OMETHVALUE node, + // we don't know whether wrapper function has been generated for it or not, so + // generate one immediately here. + ir.VisitList(fn.Body, func(n ir.Node) { + if n, ok := n.(*ir.SelectorExpr); ok && n.Op() == ir.OMETHVALUE { + wrapMethodValue(n.X.Type(), n.Selection, target, true) + } + }) + target.Decls = append(target.Decls, fn) } @@ -2516,3 +3012,45 @@ func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) { ret.Results = []ir.Node{call} fn.Body.Append(ret) } + +func setBasePos(pos src.XPos) { + // Set the position for any error messages we might print (e.g. too large types). + base.Pos = pos +} + +// dictParamName is the name of the synthetic dictionary parameter +// added to shaped functions. +const dictParamName = ".dict" + +// shapeSig returns a copy of fn's signature, except adding a +// dictionary parameter and promoting the receiver parameter (if any) +// to a normal parameter. +// +// The parameter types.Fields are all copied too, so their Nname +// fields can be initialized for use by the shape function. +func shapeSig(fn *ir.Func, dict *readerDict) *types.Type { + sig := fn.Nname.Type() + recv := sig.Recv() + nrecvs := 0 + if recv != nil { + nrecvs++ + } + + params := make([]*types.Field, 1+nrecvs+sig.Params().Fields().Len()) + params[0] = types.NewField(fn.Pos(), fn.Sym().Pkg.Lookup(dictParamName), types.NewPtr(dict.varType())) + if recv != nil { + params[1] = types.NewField(recv.Pos, recv.Sym, recv.Type) + } + for i, param := range sig.Params().Fields().Slice() { + d := types.NewField(param.Pos, param.Sym, param.Type) + d.SetIsDDD(param.IsDDD()) + params[1+nrecvs+i] = d + } + + results := make([]*types.Field, sig.Results().Fields().Len()) + for i, result := range sig.Results().Fields().Slice() { + results[i] = types.NewField(result.Pos, result.Sym, result.Type) + } + + return types.NewSignature(types.LocalPkg, nil, nil, params, results) +} diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go index 46acdab79e1..1ded3673833 100644 --- a/src/cmd/compile/internal/noder/unified.go +++ b/src/cmd/compile/internal/noder/unified.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -82,13 +80,12 @@ func unified(noders []*noder) { base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version) types.ParseLangFlag() - types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain target := typecheck.Target typecheck.TypecheckAllowed = true localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data)) - readPackage(localPkgReader, types.LocalPkg) + readPackage(localPkgReader, types.LocalPkg, true) r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate) r.pkgInit(types.LocalPkg, target) @@ -164,7 +161,7 @@ func writePkgStub(noders []*noder) string { { w := publicRootWriter w.pkg(pkg) - w.Bool(false) // has init; XXX + w.Bool(false) // TODO(mdempsky): Remove; was "has init" scope := pkg.Scope() names := scope.Names() @@ -227,42 +224,76 @@ func freePackage(pkg *types2.Package) { base.Fatalf("package never finalized") } -func readPackage(pr *pkgReader, importpkg *types.Pkg) { - r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) +// readPackage reads package export data from pr to populate +// importpkg. +// +// localStub indicates whether pr is reading the stub export data for +// the local package, as opposed to relocated export data for an +// import. +func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) { + { + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) - pkg := r.pkg() - base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg) + pkg := r.pkg() + base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg) - if r.Bool() { - sym := pkg.Lookup(".inittask") - task := ir.NewNameAt(src.NoXPos, sym) - task.Class = ir.PEXTERN - sym.Def = task + r.Bool() // TODO(mdempsky): Remove; was "has init" + + for i, n := 0, r.Len(); i < n; i++ { + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + idx := r.Reloc(pkgbits.RelocObj) + assert(r.Len() == 0) + + path, name, code := r.p.PeekObj(idx) + if code != pkgbits.ObjStub { + objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil, nil} + } + } + + r.Sync(pkgbits.SyncEOF) } - for i, n := 0, r.Len(); i < n; i++ { - r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) - idx := r.Reloc(pkgbits.RelocObj) - assert(r.Len() == 0) + if !localStub { + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate) - path, name, code := r.p.PeekObj(idx) - if code != pkgbits.ObjStub { - objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil} + if r.Bool() { + sym := importpkg.Lookup(".inittask") + task := ir.NewNameAt(src.NoXPos, sym) + task.Class = ir.PEXTERN + sym.Def = task } + + for i, n := 0, r.Len(); i < n; i++ { + path := r.String() + name := r.String() + idx := r.Reloc(pkgbits.RelocBody) + + sym := types.NewPkg(path, "").Lookup(name) + if _, ok := importBodyReader[sym]; !ok { + importBodyReader[sym] = pkgReaderIndex{pr, idx, nil, nil} + } + } + + r.Sync(pkgbits.SyncEOF) } } +// writeUnifiedExport writes to `out` the finalized, self-contained +// Unified IR export data file for the current compilation unit. func writeUnifiedExport(out io.Writer) { l := linker{ pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames), - pkgs: make(map[string]pkgbits.Index), - decls: make(map[*types.Sym]pkgbits.Index), + pkgs: make(map[string]pkgbits.Index), + decls: make(map[*types.Sym]pkgbits.Index), + bodies: make(map[*types.Sym]pkgbits.Index), } publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic) + privateRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPrivate) assert(publicRootWriter.Idx == pkgbits.PublicRootIdx) + assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx) var selfPkgIdx pkgbits.Index @@ -273,7 +304,7 @@ func writeUnifiedExport(out io.Writer) { r.Sync(pkgbits.SyncPkg) selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg)) - r.Bool() // has init + r.Bool() // TODO(mdempsky): Remove; was "has init" for i, n := 0, r.Len(); i < n; i++ { r.Sync(pkgbits.SyncObject) @@ -304,8 +335,7 @@ func writeUnifiedExport(out io.Writer) { w.Sync(pkgbits.SyncPkg) w.Reloc(pkgbits.RelocPkg, selfPkgIdx) - - w.Bool(typecheck.Lookup(".inittask").Def != nil) + w.Bool(false) // TODO(mdempsky): Remove; was "has init" w.Len(len(idxs)) for _, idx := range idxs { @@ -319,5 +349,31 @@ func writeUnifiedExport(out io.Writer) { w.Flush() } + { + type symIdx struct { + sym *types.Sym + idx pkgbits.Index + } + var bodies []symIdx + for sym, idx := range l.bodies { + bodies = append(bodies, symIdx{sym, idx}) + } + sort.Slice(bodies, func(i, j int) bool { return bodies[i].idx < bodies[j].idx }) + + w := privateRootWriter + + w.Bool(typecheck.Lookup(".inittask").Def != nil) + + w.Len(len(bodies)) + for _, body := range bodies { + w.String(body.sym.Pkg.Path) + w.String(body.sym.Name) + w.Reloc(pkgbits.RelocBody, body.idx) + } + + w.Sync(pkgbits.SyncEOF) + w.Flush() + } + base.Ctxt.Fingerprint = l.pw.DumpTo(out) } diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index b440ad3a1ee..5f8767bf833 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -1,5 +1,3 @@ -// UNREVIEWED - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,6 +14,45 @@ import ( "cmd/compile/internal/types2" ) +// This file implements the Unified IR package writer and defines the +// Unified IR export data format. +// +// Low-level coding details (e.g., byte-encoding of individual +// primitive values, or handling element bitstreams and +// cross-references) are handled by internal/pkgbits, so here we only +// concern ourselves with higher-level worries like mapping Go +// language constructs into elements. + +// There are two central types in the writing process: the "writer" +// type handles writing out individual elements, while the "pkgWriter" +// type keeps track of which elements have already been created. +// +// For each sort of "thing" (e.g., position, package, object, type) +// that can be written into the export data, there are generally +// several methods that work together: +// +// - writer.thing handles writing out a *use* of a thing, which often +// means writing a relocation to that thing's encoded index. +// +// - pkgWriter.thingIdx handles reserving an index for a thing, and +// writing out any elements needed for the thing. +// +// - writer.doThing handles writing out the *definition* of a thing, +// which in general is a mix of low-level coding primitives (e.g., +// ints and strings) or uses of other things. +// +// A design goal of Unified IR is to have a single, canonical writer +// implementation, but multiple reader implementations each tailored +// to their respective needs. For example, within cmd/compile's own +// backend, inlining is implemented largely by just re-running the +// function body reading code. + +// TODO(mdempsky): Add an importer for Unified IR to the x/tools repo, +// and better document the file format boundary between public and +// private data. + +// A pkgWriter constructs Unified IR export data from the results of +// running the types2 type checker on a Go compilation unit. type pkgWriter struct { pkgbits.PkgEncoder @@ -23,18 +60,29 @@ type pkgWriter struct { curpkg *types2.Package info *types2.Info + // Indices for previously written syntax and types2 things. + posBasesIdx map[*syntax.PosBase]pkgbits.Index pkgsIdx map[*types2.Package]pkgbits.Index typsIdx map[types2.Type]pkgbits.Index - globalsIdx map[types2.Object]pkgbits.Index + objsIdx map[types2.Object]pkgbits.Index + + // Maps from types2.Objects back to their syntax.Decl. funDecls map[*types2.Func]*syntax.FuncDecl typDecls map[*types2.TypeName]typeDeclGen - linknames map[types2.Object]string + // linknames maps package-scope objects to their linker symbol name, + // if specified by a //go:linkname directive. + linknames map[types2.Object]string + + // cgoPragmas accumulates any //go:cgo_* pragmas that need to be + // passed through to cmd/link. cgoPragmas [][]string } +// newPkgWriter returns an initialized pkgWriter for the specified +// package. func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter { return &pkgWriter{ PkgEncoder: pkgbits.NewPkgEncoder(base.Debug.SyncFrames), @@ -43,9 +91,9 @@ func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter { curpkg: pkg, info: info, - pkgsIdx: make(map[*types2.Package]pkgbits.Index), - globalsIdx: make(map[types2.Object]pkgbits.Index), - typsIdx: make(map[types2.Type]pkgbits.Index), + pkgsIdx: make(map[*types2.Package]pkgbits.Index), + objsIdx: make(map[types2.Object]pkgbits.Index), + typsIdx: make(map[types2.Type]pkgbits.Index), posBasesIdx: make(map[*syntax.PosBase]pkgbits.Index), @@ -56,34 +104,60 @@ func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter { } } +// errorf reports a user error about thing p. func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) { base.ErrorfAt(pw.m.pos(p), msg, args...) } +// fatalf reports an internal compiler error about thing p. func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) { base.FatalfAt(pw.m.pos(p), msg, args...) } +// unexpected reports a fatal error about a thing of unexpected +// dynamic type. func (pw *pkgWriter) unexpected(what string, p poser) { pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p) } +// typeOf returns the Type of the given value expression. +func (pw *pkgWriter) typeOf(expr syntax.Expr) types2.Type { + tv, ok := pw.info.Types[expr] + if !ok { + pw.fatalf(expr, "missing Types entry: %v", syntax.String(expr)) + } + if !tv.IsValue() { + pw.fatalf(expr, "expected value: %v", syntax.String(expr)) + } + return tv.Type +} + +// A writer provides APIs for writing out an individual element. type writer struct { p *pkgWriter pkgbits.Encoder + // sig holds the signature for the current function body, if any. + sig *types2.Signature + // TODO(mdempsky): We should be able to prune localsIdx whenever a // scope closes, and then maybe we can just use the same map for // storing the TypeParams too (as their TypeName instead). - // variables declared within this function + // localsIdx tracks any local variables declared within this + // function body. It's unused for writing out non-body things. localsIdx map[*types2.Var]int - closureVars []posObj - closureVarsIdx map[*types2.Var]int + // closureVars tracks any free variables that are referenced by this + // function body. It's unused for writing out non-body things. + closureVars []posVar + closureVarsIdx map[*types2.Var]int // index of previously seen free variables - dict *writerDict + dict *writerDict + + // derived tracks whether the type being written out references any + // type parameters. It's unused for writing non-type things. derived bool } @@ -107,6 +181,10 @@ type writerDict struct { // itabs lists itabs that are needed for dynamic type assertions // (including type switches). itabs []itabInfo + + // methodsExprs lists method expressions with derived-type receiver + // parameters. + methodExprs []methodExprInfo } // A derivedInfo represents a reference to an encoded generic Go type. @@ -128,16 +206,38 @@ type typeInfo struct { derived bool } +// An objInfo represents a reference to an encoded, instantiated (if +// applicable) Go object. type objInfo struct { idx pkgbits.Index // index for the generic function declaration explicits []typeInfo // info for the type arguments } +// An itabInfo represents a reference to an encoded itab entry (i.e., +// a non-empty interface type along with a concrete type that +// implements that interface). type itabInfo struct { typIdx pkgbits.Index // always a derived type index iface typeInfo // always a non-empty interface type } +// A methodExprInfo represents a reference to an encoded method +// expression, whose receiver parameter is a derived type. +type methodExprInfo struct { + recvIdx pkgbits.Index // always a derived type index + methodInfo selectorInfo +} + +// A selectorInfo represents a reference to an encoded field or method +// name (i.e., objects that can only be accessed using selector +// expressions). +type selectorInfo struct { + pkgIdx pkgbits.Index + nameIdx pkgbits.Index +} + +// anyDerived reports whether any of info's explicit type arguments +// are derived types. func (info objInfo) anyDerived() bool { for _, explicit := range info.explicits { if explicit.derived { @@ -147,6 +247,8 @@ func (info objInfo) anyDerived() bool { return false } +// equals reports whether info and other represent the same Go object +// (i.e., same base object and identical type arguments, if any). func (info objInfo) equals(other objInfo) bool { if info.idx != other.idx { return false @@ -169,6 +271,7 @@ func (pw *pkgWriter) newWriter(k pkgbits.RelocKind, marker pkgbits.SyncMarker) * // @@@ Positions +// pos writes the position of p into the element bitstream. func (w *writer) pos(p poser) { w.Sync(pkgbits.SyncPos) pos := p.Pos() @@ -178,17 +281,19 @@ func (w *writer) pos(p poser) { return } - // TODO(mdempsky): Delta encoding. Also, if there's a b-side, update - // its position base too (but not vice versa!). + // TODO(mdempsky): Delta encoding. w.posBase(pos.Base()) w.Uint(pos.Line()) w.Uint(pos.Col()) } +// posBase writes a reference to the given PosBase into the element +// bitstream. func (w *writer) posBase(b *syntax.PosBase) { w.Reloc(pkgbits.RelocPosBase, w.p.posBaseIdx(b)) } +// posBaseIdx returns the index for the given PosBase. func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) pkgbits.Index { if idx, ok := pw.posBasesIdx[b]; ok { return idx @@ -210,11 +315,18 @@ func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) pkgbits.Index { // @@@ Packages +// pkg writes a use of the given Package into the element bitstream. func (w *writer) pkg(pkg *types2.Package) { - w.Sync(pkgbits.SyncPkg) - w.Reloc(pkgbits.RelocPkg, w.p.pkgIdx(pkg)) + w.pkgRef(w.p.pkgIdx(pkg)) } +func (w *writer) pkgRef(idx pkgbits.Index) { + w.Sync(pkgbits.SyncPkg) + w.Reloc(pkgbits.RelocPkg, idx) +} + +// pkgIdx returns the index for the given package, adding it to the +// package export data if needed. func (pw *pkgWriter) pkgIdx(pkg *types2.Package) pkgbits.Index { if idx, ok := pw.pkgsIdx[pkg]; ok { return idx @@ -241,7 +353,6 @@ func (pw *pkgWriter) pkgIdx(pkg *types2.Package) pkgbits.Index { base.Assertf(path != "builtin" && path != "unsafe", "unexpected path for user-defined package: %q", path) w.String(path) w.String(pkg.Name()) - w.Len(pkg.Height()) w.Len(len(pkg.Imports())) for _, imp := range pkg.Imports() { @@ -254,12 +365,18 @@ func (pw *pkgWriter) pkgIdx(pkg *types2.Package) pkgbits.Index { // @@@ Types -var anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName) +var ( + anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName) + runeTypeName = types2.Universe.Lookup("rune").(*types2.TypeName) +) +// typ writes a use of the given type into the bitstream. func (w *writer) typ(typ types2.Type) { w.typInfo(w.p.typIdx(typ, w.dict)) } +// typInfo writes a use of the given type (specified as a typeInfo +// instead) into the bitstream. func (w *writer) typInfo(info typeInfo) { w.Sync(pkgbits.SyncType) if w.Bool(info.derived) { @@ -468,6 +585,11 @@ func (w *writer) param(param *types2.Var) { // @@@ Objects +// obj writes a use of the given object into the bitstream. +// +// If obj is a generic object, then explicits are the explicit type +// arguments used to instantiate it (i.e., used to substitute the +// object's own declared type parameters). func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) { explicitInfos := make([]typeInfo, explicits.Len()) for i := range explicitInfos { @@ -515,8 +637,13 @@ func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) { } } +// objIdx returns the index for the given Object, adding it to the +// export data as needed. func (pw *pkgWriter) objIdx(obj types2.Object) pkgbits.Index { - if idx, ok := pw.globalsIdx[obj]; ok { + // TODO(mdempsky): Validate that obj is a global object (or a local + // defined type, which we hoist to global scope anyway). + + if idx, ok := pw.objsIdx[obj]; ok { return idx } @@ -530,12 +657,35 @@ func (pw *pkgWriter) objIdx(obj types2.Object) pkgbits.Index { dict.implicits = decl.implicits } + // We encode objects into 4 elements across different sections, all + // sharing the same index: + // + // - RelocName has just the object's qualified name (i.e., + // Object.Pkg and Object.Name) and the CodeObj indicating what + // specific type of Object it is (Var, Func, etc). + // + // - RelocObj has the remaining public details about the object, + // relevant to go/types importers. + // + // - RelocObjExt has additional private details about the object, + // which are only relevant to cmd/compile itself. This is + // separated from RelocObj so that go/types importers are + // unaffected by internal compiler changes. + // + // - RelocObjDict has public details about the object's type + // parameters and derived type's used by the object. This is + // separated to facilitate the eventual introduction of + // shape-based stenciling. + // + // TODO(mdempsky): Re-evaluate whether RelocName still makes sense + // to keep separate from RelocObj. + w := pw.newWriter(pkgbits.RelocObj, pkgbits.SyncObject1) wext := pw.newWriter(pkgbits.RelocObjExt, pkgbits.SyncObject1) wname := pw.newWriter(pkgbits.RelocName, pkgbits.SyncObject1) wdict := pw.newWriter(pkgbits.RelocObjDict, pkgbits.SyncObject1) - pw.globalsIdx[obj] = w.Idx // break cycles + pw.objsIdx[obj] = w.Idx // break cycles assert(wext.Idx == w.Idx) assert(wname.Idx == w.Idx) assert(wdict.Idx == w.Idx) @@ -557,6 +707,8 @@ func (pw *pkgWriter) objIdx(obj types2.Object) pkgbits.Index { return w.Idx } +// doObj writes the RelocObj definition for obj to w, and the +// RelocObjExt definition to wext. func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj { if obj.Pkg() != w.p.curpkg { return pkgbits.ObjStub @@ -619,6 +771,8 @@ func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj { } // typExpr writes the type represented by the given expression. +// +// TODO(mdempsky): Document how this differs from exprType. func (w *writer) typExpr(expr syntax.Expr) { tv, ok := w.p.info.Types[expr] assert(ok) @@ -667,6 +821,12 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) { w.typInfo(itab.iface) } + w.Len(len(dict.methodExprs)) + for _, methodExpr := range dict.methodExprs { + w.Len(int(methodExpr.recvIdx)) + w.selectorInfo(methodExpr.methodInfo) + } + assert(len(dict.derived) == nderived) assert(len(dict.funcs) == nfuncs) } @@ -724,8 +884,8 @@ func (w *writer) qualifiedIdent(obj types2.Object) { // me a little nervous to try it again. // localIdent writes the name of a locally declared object (i.e., -// objects that can only be accessed by name, within the context of a -// particular function). +// objects that can only be accessed by non-qualified name, within the +// context of a particular function). func (w *writer) localIdent(obj types2.Object) { assert(!isGlobal(obj)) w.Sync(pkgbits.SyncLocalIdent) @@ -736,9 +896,19 @@ func (w *writer) localIdent(obj types2.Object) { // selector writes the name of a field or method (i.e., objects that // can only be accessed using selector expressions). func (w *writer) selector(obj types2.Object) { + w.selectorInfo(w.p.selectorIdx(obj)) +} + +func (w *writer) selectorInfo(info selectorInfo) { w.Sync(pkgbits.SyncSelector) - w.pkg(obj.Pkg()) - w.String(obj.Name()) + w.pkgRef(info.pkgIdx) + w.StringRef(info.nameIdx) +} + +func (pw *pkgWriter) selectorIdx(obj types2.Object) selectorInfo { + pkgIdx := pw.pkgIdx(obj.Pkg()) + nameIdx := pw.StringIdx(obj.Name()) + return selectorInfo{pkgIdx: pkgIdx, nameIdx: nameIdx} } // @@@ Compiler extensions @@ -787,7 +957,7 @@ func (w *writer) funcExt(obj *types2.Func) { } sig, block := obj.Type().(*types2.Signature), decl.Body - body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, block, w.dict) + body, closureVars := w.p.bodyIdx(sig, block, w.dict) assert(len(closureVars) == 0) w.Sync(pkgbits.SyncFuncExt) @@ -829,8 +999,11 @@ func (w *writer) pragmaFlag(p ir.PragmaFlag) { // @@@ Function bodies -func (pw *pkgWriter) bodyIdx(pkg *types2.Package, sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx pkgbits.Index, closureVars []posObj) { +// bodyIdx returns the index for the given function body (specified by +// block), adding it to the export data +func (pw *pkgWriter) bodyIdx(sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx pkgbits.Index, closureVars []posVar) { w := pw.newWriter(pkgbits.RelocBody, pkgbits.SyncFuncBody) + w.sig = sig w.dict = dict w.funcargs(sig) @@ -862,10 +1035,11 @@ func (w *writer) funcarg(param *types2.Var, result bool) { } } +// addLocal records the declaration of a new local variable. func (w *writer) addLocal(obj *types2.Var) { w.Sync(pkgbits.SyncAddLocal) idx := len(w.localsIdx) - if pkgbits.EnableSync { + if w.p.SyncMarkers() { w.Int(idx) } if w.localsIdx == nil { @@ -874,6 +1048,8 @@ func (w *writer) addLocal(obj *types2.Var) { w.localsIdx[obj] = idx } +// useLocal writes a reference to the given local or free variable +// into the bitstream. func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) { w.Sync(pkgbits.SyncUseObjLocal) @@ -888,7 +1064,7 @@ func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) { w.closureVarsIdx = make(map[*types2.Var]int) } idx = len(w.closureVars) - w.closureVars = append(w.closureVars, posObj{pos, obj}) + w.closureVars = append(w.closureVars, posVar{pos, obj}) w.closureVarsIdx[obj] = idx } w.Len(idx) @@ -911,6 +1087,7 @@ func (w *writer) closeAnotherScope() { // @@@ Statements +// stmt writes the given statement into the function body bitstream. func (w *writer) stmt(stmt syntax.Stmt) { var stmts []syntax.Stmt if stmt != nil { @@ -949,13 +1126,15 @@ func (w *writer) stmt1(stmt syntax.Stmt) { w.op(binOps[stmt.Op]) w.expr(stmt.Lhs) w.pos(stmt) - w.expr(stmt.Rhs) + + var typ types2.Type + if stmt.Op != syntax.Shl && stmt.Op != syntax.Shr { + typ = w.p.typeOf(stmt.Lhs) + } + w.implicitConvExpr(stmt, typ, stmt.Rhs) default: - w.Code(stmtAssign) - w.pos(stmt) - w.exprList(stmt.Rhs) - w.assignList(stmt.Lhs) + w.assignStmt(stmt, stmt.Lhs, stmt.Rhs) } case *syntax.BlockStmt: @@ -1000,17 +1179,24 @@ func (w *writer) stmt1(stmt syntax.Stmt) { case *syntax.ReturnStmt: w.Code(stmtReturn) w.pos(stmt) - w.exprList(stmt.Results) + + resultTypes := w.sig.Results() + dstType := func(i int) types2.Type { + return resultTypes.At(i).Type() + } + w.multiExpr(stmt, dstType, unpackListExpr(stmt.Results)) case *syntax.SelectStmt: w.Code(stmtSelect) w.selectStmt(stmt) case *syntax.SendStmt: + chanType := types2.CoreType(w.p.typeOf(stmt.Chan)).(*types2.Chan) + w.Code(stmtSend) w.pos(stmt) w.expr(stmt.Chan) - w.expr(stmt.Value) + w.implicitConvExpr(stmt, chanType.Elem(), stmt.Value) case *syntax.SwitchStmt: w.Code(stmtSwitch) @@ -1023,25 +1209,36 @@ func (w *writer) assignList(expr syntax.Expr) { w.Len(len(exprs)) for _, expr := range exprs { - if name, ok := expr.(*syntax.Name); ok && name.Value != "_" { - if obj, ok := w.p.info.Defs[name]; ok { - obj := obj.(*types2.Var) + w.assign(expr) + } +} - w.Bool(true) - w.pos(obj) - w.localIdent(obj) - w.typ(obj.Type()) +func (w *writer) assign(expr syntax.Expr) { + expr = unparen(expr) - // TODO(mdempsky): Minimize locals index size by deferring - // this until the variables actually come into scope. - w.addLocal(obj) - continue - } + if name, ok := expr.(*syntax.Name); ok { + if name.Value == "_" { + w.Code(assignBlank) + return } - w.Bool(false) - w.expr(expr) + if obj, ok := w.p.info.Defs[name]; ok { + obj := obj.(*types2.Var) + + w.Code(assignDef) + w.pos(obj) + w.localIdent(obj) + w.typ(obj.Type()) + + // TODO(mdempsky): Minimize locals index size by deferring + // this until the variables actually come into scope. + w.addLocal(obj) + return + } } + + w.Code(assignExpr) + w.expr(expr) } func (w *writer) declStmt(decl syntax.Decl) { @@ -1052,13 +1249,48 @@ func (w *writer) declStmt(decl syntax.Decl) { case *syntax.ConstDecl, *syntax.TypeDecl: case *syntax.VarDecl: - w.Code(stmtAssign) - w.pos(decl) - w.exprList(decl.Values) - w.assignList(namesAsExpr(decl.NameList)) + w.assignStmt(decl, namesAsExpr(decl.NameList), decl.Values) } } +// assignStmt writes out an assignment for "lhs = rhs". +func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) { + lhs := unpackListExpr(lhs0) + rhs := unpackListExpr(rhs0) + + w.Code(stmtAssign) + w.pos(pos) + + // As if w.assignList(lhs0). + w.Len(len(lhs)) + for _, expr := range lhs { + w.assign(expr) + } + + dstType := func(i int) types2.Type { + dst := lhs[i] + + // Finding dstType is somewhat involved, because for VarDecl + // statements, the Names are only added to the info.{Defs,Uses} + // maps, not to info.Types. + if name, ok := unparen(dst).(*syntax.Name); ok { + if name.Value == "_" { + return nil // ok: no implicit conversion + } else if def, ok := w.p.info.Defs[name].(*types2.Var); ok { + return def.Type() + } else if use, ok := w.p.info.Uses[name].(*types2.Var); ok { + return use.Type() + } else { + w.p.fatalf(dst, "cannot find type of destination object: %v", dst) + } + } + + return w.p.typeOf(dst) + } + + w.multiExpr(pos, dstType, rhs) +} + func (w *writer) blockStmt(stmt *syntax.BlockStmt) { w.Sync(pkgbits.SyncBlockStmt) w.openScope(stmt.Pos()) @@ -1072,12 +1304,45 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) { if rang, ok := stmt.Init.(*syntax.RangeClause); w.Bool(ok) { w.pos(rang) - w.expr(rang.X) w.assignList(rang.Lhs) + w.expr(rang.X) + + xtyp := w.p.typeOf(rang.X) + if _, isMap := types2.CoreType(xtyp).(*types2.Map); isMap { + w.rtype(xtyp) + } + { + lhs := unpackListExpr(rang.Lhs) + assign := func(i int, src types2.Type) { + if i >= len(lhs) { + return + } + dst := unparen(lhs[i]) + if name, ok := dst.(*syntax.Name); ok && name.Value == "_" { + return + } + + var dstType types2.Type + if rang.Def { + // For `:=` assignments, the LHS names only appear in Defs, + // not Types (as used by typeOf). + dstType = w.p.info.Defs[dst.(*syntax.Name)].(*types2.Var).Type() + } else { + dstType = w.p.typeOf(dst) + } + + w.convRTTI(src, dstType) + } + + keyType, valueType := w.p.rangeTypes(rang.X) + assign(0, keyType) + assign(1, valueType) + } + } else { w.pos(stmt) w.stmt(stmt.Init) - w.expr(stmt.Cond) + w.optExpr(stmt.Cond) w.stmt(stmt.Post) } @@ -1085,6 +1350,30 @@ func (w *writer) forStmt(stmt *syntax.ForStmt) { w.closeAnotherScope() } +// rangeTypes returns the types of values produced by ranging over +// expr. +func (pw *pkgWriter) rangeTypes(expr syntax.Expr) (key, value types2.Type) { + typ := pw.typeOf(expr) + switch typ := types2.CoreType(typ).(type) { + case *types2.Pointer: // must be pointer to array + return types2.Typ[types2.Int], types2.CoreType(typ.Elem()).(*types2.Array).Elem() + case *types2.Array: + return types2.Typ[types2.Int], typ.Elem() + case *types2.Slice: + return types2.Typ[types2.Int], typ.Elem() + case *types2.Basic: + if typ.Info()&types2.IsString != 0 { + return types2.Typ[types2.Int], runeTypeName.Type() + } + case *types2.Map: + return typ.Key(), typ.Elem() + case *types2.Chan: + return typ.Elem(), nil + } + pw.fatalf(expr, "unexpected range type: %v", typ) + panic("unreachable") +} + func (w *writer) ifStmt(stmt *syntax.IfStmt) { w.Sync(pkgbits.SyncIfStmt) w.openScope(stmt.Pos()) @@ -1123,11 +1412,9 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) { w.pos(stmt) w.stmt(stmt.Init) - var iface types2.Type + var iface, tagType types2.Type if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.Bool(ok) { - tv, ok := w.p.info.Types[guard.X] - assert(ok && tv.IsValue()) - iface = tv.Type + iface = w.p.typeOf(guard.X) w.pos(guard) if tag := guard.Lhs; w.Bool(tag != nil) { @@ -1136,7 +1423,32 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) { } w.expr(guard.X) } else { - w.expr(stmt.Tag) + tag := stmt.Tag + + if tag != nil { + tagType = w.p.typeOf(tag) + } else { + tagType = types2.Typ[types2.Bool] + } + + // Walk is going to emit comparisons between the tag value and + // each case expression, and we want these comparisons to always + // have the same type. If there are any case values that can't be + // converted to the tag value's type, then convert everything to + // `any` instead. + Outer: + for _, clause := range stmt.Body { + for _, cas := range unpackListExpr(clause.Cases) { + if casType := w.p.typeOf(cas); !types2.AssignableTo(casType, tagType) { + tagType = types2.NewInterfaceType(nil, nil) + break Outer + } + } + } + + if w.Bool(tag != nil) { + w.implicitConvExpr(tag, tagType, tag) + } } w.Len(len(stmt.Body)) @@ -1148,14 +1460,25 @@ func (w *writer) switchStmt(stmt *syntax.SwitchStmt) { w.pos(clause) + cases := unpackListExpr(clause.Cases) if iface != nil { - cases := unpackListExpr(clause.Cases) w.Len(len(cases)) for _, cas := range cases { - w.exprType(iface, cas, true) + if w.Bool(isNil(w.p.info, cas)) { + continue + } + w.exprType(iface, cas) } } else { - w.exprList(clause.Cases) + // As if w.exprList(clause.Cases), + // but with implicit conversions to tagType. + + w.Sync(pkgbits.SyncExprList) + w.Sync(pkgbits.SyncExprs) + w.Len(len(cases)) + for _, cas := range cases { + w.implicitConvExpr(cas, tagType, cas) + } } if obj, ok := w.p.info.Implicits[clause]; ok { @@ -1200,30 +1523,26 @@ func (w *writer) optLabel(label *syntax.Name) { // @@@ Expressions +// expr writes the given expression into the function body bitstream. func (w *writer) expr(expr syntax.Expr) { + base.Assertf(expr != nil, "missing expression") + expr = unparen(expr) // skip parens; unneeded after typecheck obj, inst := lookupObj(w.p.info, expr) targs := inst.TypeArgs if tv, ok := w.p.info.Types[expr]; ok { - // TODO(mdempsky): Be more judicious about which types are marked as "needed". - if inst.Type != nil { - w.needType(inst.Type) - } else { - w.needType(tv.Type) - } - if tv.IsType() { - w.Code(exprType) - w.exprType(nil, expr, false) - return + w.p.fatalf(expr, "unexpected type expression %v", syntax.String(expr)) } if tv.Value != nil { w.Code(exprConst) w.pos(expr) - w.typ(tv.Type) + typ := idealType(tv) + assert(typ != nil) + w.typ(typ) w.Value(tv.Value) // TODO(mdempsky): These details are only important for backend @@ -1232,11 +1551,18 @@ func (w *writer) expr(expr syntax.Expr) { w.String(syntax.String(expr)) return } + + if _, isNil := obj.(*types2.Nil); isNil { + w.Code(exprNil) + w.pos(expr) + w.typ(tv.Type) + return + } } if obj != nil { if isGlobal(obj) { - w.Code(exprName) + w.Code(exprGlobal) w.obj(obj, targs) return } @@ -1254,13 +1580,6 @@ func (w *writer) expr(expr syntax.Expr) { default: w.p.unexpected("expression", expr) - case nil: // absent slice index, for condition, or switch tag - w.Code(exprNone) - - case *syntax.Name: - assert(expr.Value == "_") - w.Code(exprBlank) - case *syntax.CompositeLit: w.Code(exprCompLit) w.compLit(expr) @@ -1274,35 +1593,60 @@ func (w *writer) expr(expr syntax.Expr) { assert(ok) w.Code(exprSelector) - w.expr(expr.X) + if w.Bool(sel.Kind() == types2.MethodExpr) { + tv, ok := w.p.info.Types[expr.X] + assert(ok) + assert(tv.IsType()) + + typInfo := w.p.typIdx(tv.Type, w.dict) + if w.Bool(typInfo.derived) { + methodInfo := w.p.selectorIdx(sel.Obj()) + idx := w.dict.methodExprIdx(typInfo, methodInfo) + w.Len(idx) + break + } + + w.typInfo(typInfo) + } else { + w.expr(expr.X) + } w.pos(expr) w.selector(sel.Obj()) case *syntax.IndexExpr: - tv, ok := w.p.info.Types[expr.Index] - assert(ok && tv.IsValue()) + _ = w.p.typeOf(expr.Index) // ensure this is an index expression, not an instantiation + + xtyp := w.p.typeOf(expr.X) + + var keyType types2.Type + if mapType, ok := types2.CoreType(xtyp).(*types2.Map); ok { + keyType = mapType.Key() + } w.Code(exprIndex) w.expr(expr.X) w.pos(expr) - w.expr(expr.Index) + w.implicitConvExpr(expr, keyType, expr.Index) + if keyType != nil { + w.rtype(xtyp) + } case *syntax.SliceExpr: w.Code(exprSlice) w.expr(expr.X) w.pos(expr) for _, n := range &expr.Index { - w.expr(n) + w.optExpr(n) } case *syntax.AssertExpr: - tv, ok := w.p.info.Types[expr.X] - assert(ok && tv.IsValue()) + iface := w.p.typeOf(expr.X) w.Code(exprAssert) w.expr(expr.X) w.pos(expr) - w.exprType(tv.Type, expr.Type, false) + w.exprType(iface, expr.Type) + w.rtype(iface) case *syntax.Operation: if expr.Y == nil { @@ -1313,11 +1657,28 @@ func (w *writer) expr(expr syntax.Expr) { break } + var commonType types2.Type + switch expr.Op { + case syntax.Shl, syntax.Shr: + // ok: operands are allowed to have different types + default: + xtyp := w.p.typeOf(expr.X) + ytyp := w.p.typeOf(expr.Y) + switch { + case types2.AssignableTo(xtyp, ytyp): + commonType = ytyp + case types2.AssignableTo(ytyp, xtyp): + commonType = xtyp + default: + w.p.fatalf(expr, "failed to find common type between %v and %v", xtyp, ytyp) + } + } + w.Code(exprBinaryOp) w.op(binOps[expr.Op]) - w.expr(expr.X) + w.implicitConvExpr(expr, commonType, expr.X) w.pos(expr) - w.expr(expr.Y) + w.implicitConvExpr(expr, commonType, expr.Y) case *syntax.CallExpr: tv, ok := w.p.info.Types[expr.Fun] @@ -1327,12 +1688,68 @@ func (w *writer) expr(expr syntax.Expr) { assert(!expr.HasDots) w.Code(exprConvert) + w.Bool(false) // explicit w.typ(tv.Type) w.pos(expr) + w.convRTTI(w.p.typeOf(expr.ArgList[0]), tv.Type) w.expr(expr.ArgList[0]) break } + var rtype types2.Type + if tv.IsBuiltin() { + switch obj, _ := lookupObj(w.p.info, expr.Fun); obj.Name() { + case "make": + assert(len(expr.ArgList) >= 1) + assert(!expr.HasDots) + + w.Code(exprMake) + w.pos(expr) + w.exprType(nil, expr.ArgList[0]) + w.exprs(expr.ArgList[1:]) + + typ := w.p.typeOf(expr) + switch coreType := types2.CoreType(typ).(type) { + default: + w.p.fatalf(expr, "unexpected core type: %v", coreType) + case *types2.Chan: + w.rtype(typ) + case *types2.Map: + w.rtype(typ) + case *types2.Slice: + w.rtype(sliceElem(typ)) + } + + return + + case "new": + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + + w.Code(exprNew) + w.pos(expr) + w.exprType(nil, expr.ArgList[0]) + return + + case "append": + rtype = sliceElem(w.p.typeOf(expr)) + case "copy": + typ := w.p.typeOf(expr.ArgList[0]) + if tuple, ok := typ.(*types2.Tuple); ok { // "copy(g())" + typ = tuple.At(0).Type() + } + rtype = sliceElem(typ) + case "delete": + typ := w.p.typeOf(expr.ArgList[0]) + if tuple, ok := typ.(*types2.Tuple); ok { // "delete(g())" + typ = tuple.At(0).Type() + } + rtype = typ + case "Slice": + rtype = sliceElem(w.p.typeOf(expr)) + } + } + writeFunExpr := func() { if selector, ok := unparen(expr.Fun).(*syntax.SelectorExpr); ok { if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal { @@ -1348,59 +1765,157 @@ func (w *writer) expr(expr syntax.Expr) { w.Bool(false) // not a method call (i.e., normal function call) } + sigType := types2.CoreType(tv.Type).(*types2.Signature) + paramTypes := sigType.Params() + w.Code(exprCall) writeFunExpr() w.pos(expr) - w.exprs(expr.ArgList) + + paramType := func(i int) types2.Type { + if sigType.Variadic() && !expr.HasDots && i >= paramTypes.Len()-1 { + return paramTypes.At(paramTypes.Len() - 1).Type().(*types2.Slice).Elem() + } + return paramTypes.At(i).Type() + } + + w.multiExpr(expr, paramType, expr.ArgList) w.Bool(expr.HasDots) + if rtype != nil { + w.rtype(rtype) + } } } +func sliceElem(typ types2.Type) types2.Type { + return types2.CoreType(typ).(*types2.Slice).Elem() +} + +func (w *writer) optExpr(expr syntax.Expr) { + if w.Bool(expr != nil) { + w.expr(expr) + } +} + +// multiExpr writes a sequence of expressions, where the i'th value is +// implicitly converted to dstType(i). It also handles when exprs is a +// single, multi-valued expression (e.g., the multi-valued argument in +// an f(g()) call, or the RHS operand in a comma-ok assignment). +func (w *writer) multiExpr(pos poser, dstType func(int) types2.Type, exprs []syntax.Expr) { + w.Sync(pkgbits.SyncMultiExpr) + + if len(exprs) == 1 { + expr := exprs[0] + if tuple, ok := w.p.typeOf(expr).(*types2.Tuple); ok { + assert(tuple.Len() > 1) + w.Bool(true) // N:1 assignment + w.pos(pos) + w.expr(expr) + + w.Len(tuple.Len()) + for i := 0; i < tuple.Len(); i++ { + src := tuple.At(i).Type() + // TODO(mdempsky): Investigate not writing src here. I think + // the reader should be able to infer it from expr anyway. + w.typ(src) + if dst := dstType(i); w.Bool(dst != nil && !types2.Identical(src, dst)) { + if src == nil || dst == nil { + w.p.fatalf(pos, "src is %v, dst is %v", src, dst) + } + if !types2.AssignableTo(src, dst) { + w.p.fatalf(pos, "%v is not assignable to %v", src, dst) + } + w.typ(dst) + w.convRTTI(src, dst) + } + } + return + } + } + + w.Bool(false) // N:N assignment + w.Len(len(exprs)) + for i, expr := range exprs { + w.implicitConvExpr(pos, dstType(i), expr) + } +} + +// implicitConvExpr is like expr, but if dst is non-nil and different from +// expr's type, then an implicit conversion operation is inserted at +// pos. +func (w *writer) implicitConvExpr(pos poser, dst types2.Type, expr syntax.Expr) { + src := w.p.typeOf(expr) + if dst != nil && !types2.Identical(src, dst) { + if !types2.AssignableTo(src, dst) { + w.p.fatalf(pos, "%v is not assignable to %v", src, dst) + } + w.Code(exprConvert) + w.Bool(true) // implicit + w.typ(dst) + w.pos(pos) + w.convRTTI(src, dst) + // fallthrough + } + w.expr(expr) +} + func (w *writer) compLit(lit *syntax.CompositeLit) { - tv, ok := w.p.info.Types[lit] - assert(ok) + typ := w.p.typeOf(lit) w.Sync(pkgbits.SyncCompLit) w.pos(lit) - w.typ(tv.Type) + w.typ(typ) - typ := tv.Type if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok { typ = ptr.Elem() } - str, isStruct := types2.CoreType(typ).(*types2.Struct) + var keyType, elemType types2.Type + var structType *types2.Struct + switch typ0 := typ; typ := types2.CoreType(typ).(type) { + default: + w.p.fatalf(lit, "unexpected composite literal type: %v", typ) + case *types2.Array: + elemType = typ.Elem() + case *types2.Map: + w.rtype(typ0) + keyType, elemType = typ.Key(), typ.Elem() + case *types2.Slice: + elemType = typ.Elem() + case *types2.Struct: + structType = typ + } w.Len(len(lit.ElemList)) for i, elem := range lit.ElemList { - if isStruct { + elemType := elemType + if structType != nil { if kv, ok := elem.(*syntax.KeyValueExpr); ok { // use position of expr.Key rather than of elem (which has position of ':') w.pos(kv.Key) - w.Len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name))) + i = fieldIndex(w.p.info, structType, kv.Key.(*syntax.Name)) elem = kv.Value } else { w.pos(elem) - w.Len(i) } + elemType = structType.Field(i).Type() + w.Len(i) } else { if kv, ok := elem.(*syntax.KeyValueExpr); w.Bool(ok) { // use position of expr.Key rather than of elem (which has position of ':') w.pos(kv.Key) - w.expr(kv.Key) + w.implicitConvExpr(kv.Key, keyType, kv.Key) elem = kv.Value } } w.pos(elem) - w.expr(elem) + w.implicitConvExpr(elem, elemType, elem) } } func (w *writer) funcLit(expr *syntax.FuncLit) { - tv, ok := w.p.info.Types[expr] - assert(ok) - sig := tv.Type.(*types2.Signature) + sig := w.p.typeOf(expr).(*types2.Signature) - body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, expr.Body, w.dict) + body, closureVars := w.p.bodyIdx(sig, expr.Body, w.dict) w.Sync(pkgbits.SyncFuncLit) w.pos(expr) @@ -1409,15 +1924,15 @@ func (w *writer) funcLit(expr *syntax.FuncLit) { w.Len(len(closureVars)) for _, cv := range closureVars { w.pos(cv.pos) - w.useLocal(cv.pos, cv.obj) + w.useLocal(cv.pos, cv.var_) } w.Reloc(pkgbits.RelocBody, body) } -type posObj struct { - pos syntax.Pos - obj *types2.Var +type posVar struct { + pos syntax.Pos + var_ *types2.Var } func (w *writer) exprList(expr syntax.Expr) { @@ -1426,10 +1941,6 @@ func (w *writer) exprList(expr syntax.Expr) { } func (w *writer) exprs(exprs []syntax.Expr) { - if len(exprs) == 0 { - assert(exprs == nil) - } - w.Sync(pkgbits.SyncExprs) w.Len(len(exprs)) for _, expr := range exprs { @@ -1437,21 +1948,41 @@ func (w *writer) exprs(exprs []syntax.Expr) { } } -func (w *writer) exprType(iface types2.Type, typ syntax.Expr, nilOK bool) { +// rtype writes information so that the reader can construct an +// expression of type *runtime._type representing typ. +func (w *writer) rtype(typ types2.Type) { + w.Sync(pkgbits.SyncRType) + w.typNeeded(typ) +} + +// typNeeded writes a reference to typ, and records that its +// *runtime._type is needed. +func (w *writer) typNeeded(typ types2.Type) { + info := w.p.typIdx(typ, w.dict) + w.typInfo(info) + + if info.derived { + w.dict.derived[info.idx].needed = true + } +} + +// convRTTI writes information so that the reader can construct +// expressions for converting from src to dst. +func (w *writer) convRTTI(src, dst types2.Type) { + w.Sync(pkgbits.SyncConvRTTI) + w.typNeeded(src) + w.typNeeded(dst) +} + +func (w *writer) exprType(iface types2.Type, typ syntax.Expr) { base.Assertf(iface == nil || isInterface(iface), "%v must be nil or an interface type", iface) tv, ok := w.p.info.Types[typ] assert(ok) - - w.Sync(pkgbits.SyncExprType) - - if nilOK && w.Bool(tv.IsNil()) { - return - } - assert(tv.IsType()) info := w.p.typIdx(tv.Type, w.dict) + w.Sync(pkgbits.SyncExprType) w.pos(typ) if w.Bool(info.derived && iface != nil && !iface.Underlying().(*types2.Interface).Empty()) { @@ -1472,8 +2003,29 @@ func (w *writer) exprType(iface types2.Type, typ syntax.Expr, nilOK bool) { } w.typInfo(info) + if info.derived { + w.dict.derived[info.idx].needed = true + } } +func (dict *writerDict) methodExprIdx(recvInfo typeInfo, methodInfo selectorInfo) int { + assert(recvInfo.derived) + newInfo := methodExprInfo{recvIdx: recvInfo.idx, methodInfo: methodInfo} + + for idx, oldInfo := range dict.methodExprs { + if oldInfo == newInfo { + return idx + } + } + + idx := len(dict.methodExprs) + dict.methodExprs = append(dict.methodExprs, newInfo) + return idx +} + +// isInterface reports whether typ is known to be an interface type. +// If typ is a type parameter, then isInterface reports an internal +// compiler error instead. func isInterface(typ types2.Type) bool { if _, ok := typ.(*types2.TypeParam); ok { // typ is a type parameter and may be instantiated as either a @@ -1486,6 +2038,7 @@ func isInterface(typ types2.Type) bool { return ok } +// op writes an Op into the bitstream. func (w *writer) op(op ir.Op) { // TODO(mdempsky): Remove in favor of explicit codes? Would make // export data more stable against internal refactorings, but low @@ -1495,20 +2048,6 @@ func (w *writer) op(op ir.Op) { w.Len(int(op)) } -func (w *writer) needType(typ types2.Type) { - // Decompose tuple into component element types. - if typ, ok := typ.(*types2.Tuple); ok { - for i := 0; i < typ.Len(); i++ { - w.needType(typ.At(i).Type()) - } - return - } - - if info := w.p.typIdx(typ, w.dict); info.derived { - w.dict.derived[info.idx].needed = true - } -} - // @@@ Package initialization // Caution: This code is still clumsy, because toolstash -cmp is @@ -1526,6 +2065,12 @@ type fileImports struct { importedEmbed, importedUnsafe bool } +// declCollector is a visitor type that collects compiler-needed +// information about declarations that types2 doesn't track. +// +// Notably, it maps declared types and functions back to their +// declaration statement, keeps track of implicit type parameters, and +// assigns unique type "generation" numbers to local defined types. type declCollector struct { pw *pkgWriter typegen *int @@ -1649,10 +2194,7 @@ func (pw *pkgWriter) collectDecls(noders []*noder) { } default: - // TODO(mdempsky): Enable after #42938 is fixed. - if false { - pw.errorf(l.pos, "//go:linkname must refer to declared function or variable") - } + pw.errorf(l.pos, "//go:linkname must refer to declared function or variable") } } } @@ -1750,6 +2292,12 @@ func (w *writer) pkgDecl(decl syntax.Decl) { w.Code(declVar) w.pos(decl) w.pkgObjs(decl.NameList...) + + // TODO(mdempsky): It would make sense to use multiExpr here, but + // that results in IR that confuses pkginit/initorder.go. So we + // continue using exprList, and let typecheck handle inserting any + // implicit conversions. That's okay though, because package-scope + // assignments never require dictionaries. w.exprList(decl.Values) var embeds []pragmaEmbed @@ -1836,6 +2384,27 @@ func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool { return false } +// isMultiValueExpr reports whether expr is a function call expression +// that yields multiple values. +func isMultiValueExpr(info *types2.Info, expr syntax.Expr) bool { + tv, ok := info.Types[expr] + assert(ok) + assert(tv.IsValue()) + if tuple, ok := tv.Type.(*types2.Tuple); ok { + assert(tuple.Len() > 1) + return true + } + return false +} + +// isNil reports whether expr is a (possibly parenthesized) reference +// to the predeclared nil value. +func isNil(info *types2.Info, expr syntax.Expr) bool { + tv, ok := info.Types[expr] + assert(ok) + return tv.IsNil() +} + // recvBase returns the base type for the given receiver parameter. func recvBase(recv *types2.Var) *types2.Named { typ := recv.Type() diff --git a/src/cmd/compile/internal/reflectdata/helpers.go b/src/cmd/compile/internal/reflectdata/helpers.go new file mode 100644 index 00000000000..99461cff52b --- /dev/null +++ b/src/cmd/compile/internal/reflectdata/helpers.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func hasRType(n, rtype ir.Node, fieldName string) bool { + if rtype != nil { + return true + } + + // We make an exception for `init`, because we still depend on + // pkginit for sorting package initialization statements, and it + // gets confused by implicit conversions. Also, because + // package-scope statements can never be generic, so they'll never + // require dictionary lookups. + if base.Debug.Unified != 0 && ir.CurFunc.Nname.Sym().Name != "init" { + ir.Dump("CurFunc", ir.CurFunc) + base.FatalfAt(n.Pos(), "missing %s in %v: %+v", fieldName, ir.CurFunc, n) + } + + return false +} + +// assertOp asserts that n is an op. +func assertOp(n ir.Node, op ir.Op) { + base.AssertfAt(n.Op() == op, n.Pos(), "want %v, have %v", op, n) +} + +// assertOp2 asserts that n is an op1 or op2. +func assertOp2(n ir.Node, op1, op2 ir.Op) { + base.AssertfAt(n.Op() == op1 || n.Op() == op2, n.Pos(), "want %v or %v, have %v", op1, op2, n) +} + +// kindRType asserts that typ has the given kind, and returns an +// expression that yields the *runtime._type value representing typ. +func kindRType(pos src.XPos, typ *types.Type, k types.Kind) ir.Node { + base.AssertfAt(typ.Kind() == k, pos, "want %v type, have %v", k, typ) + return TypePtrAt(pos, typ) +} + +// mapRType asserts that typ is a map type, and returns an expression +// that yields the *runtime._type value representing typ. +func mapRType(pos src.XPos, typ *types.Type) ir.Node { + return kindRType(pos, typ, types.TMAP) +} + +// chanRType asserts that typ is a map type, and returns an expression +// that yields the *runtime._type value representing typ. +func chanRType(pos src.XPos, typ *types.Type) ir.Node { + return kindRType(pos, typ, types.TCHAN) +} + +// sliceElemRType asserts that typ is a slice type, and returns an +// expression that yields the *runtime._type value representing typ's +// element type. +func sliceElemRType(pos src.XPos, typ *types.Type) ir.Node { + base.AssertfAt(typ.IsSlice(), pos, "want slice type, have %v", typ) + return TypePtrAt(pos, typ.Elem()) +} + +// concreteRType asserts that typ is not an interface type, and +// returns an expression that yields the *runtime._type value +// representing typ. +func concreteRType(pos src.XPos, typ *types.Type) ir.Node { + base.AssertfAt(!typ.IsInterface(), pos, "want non-interface type, have %v", typ) + return TypePtrAt(pos, typ) +} + +// AppendElemRType asserts that n is an "append" operation, and +// returns an expression that yields the *runtime._type value +// representing the result slice type's element type. +func AppendElemRType(pos src.XPos, n *ir.CallExpr) ir.Node { + assertOp(n, ir.OAPPEND) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} + +// CompareRType asserts that n is a comparison (== or !=) operation +// between expressions of interface and non-interface type, and +// returns an expression that yields the *runtime._type value +// representing the non-interface type. +func CompareRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp2(n, ir.OEQ, ir.ONE) + base.AssertfAt(n.X.Type().IsInterface() != n.Y.Type().IsInterface(), n.Pos(), "expect mixed interface and non-interface, have %L and %L", n.X, n.Y) + if hasRType(n, n.RType, "RType") { + return n.RType + } + typ := n.X.Type() + if typ.IsInterface() { + typ = n.Y.Type() + } + return concreteRType(pos, typ) +} + +// ConvIfaceTypeWord asserts that n is conversion to interface type, +// and returns an expression that yields the *runtime._type or +// *runtime.itab value necessary for implementing the conversion. +// +// - *runtime._type for the destination type, for I2I conversions +// - *runtime.itab, for T2I conversions +// - *runtime._type for the source type, for T2E conversions +func ConvIfaceTypeWord(pos src.XPos, n *ir.ConvExpr) ir.Node { + assertOp(n, ir.OCONVIFACE) + src, dst := n.X.Type(), n.Type() + base.AssertfAt(dst.IsInterface(), n.Pos(), "want interface type, have %L", n) + if hasRType(n, n.TypeWord, "TypeWord") { + return n.TypeWord + } + if dst.IsEmptyInterface() { + return concreteRType(pos, src) // direct eface construction + } + if !src.IsInterface() { + return ITabAddrAt(pos, src, dst) // direct iface construction + } + return TypePtrAt(pos, dst) // convI2I +} + +// ConvIfaceSrcRType asserts that n is a conversion from +// non-interface type to interface type (or OCONVIDATA operation), and +// returns an expression that yields the *runtime._type for copying +// the convertee value to the heap. +func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node { + assertOp2(n, ir.OCONVIFACE, ir.OCONVIDATA) + if hasRType(n, n.SrcRType, "SrcRType") { + return n.SrcRType + } + return concreteRType(pos, n.X.Type()) +} + +// CopyElemRType asserts that n is a "copy" operation, and returns an +// expression that yields the *runtime._type value representing the +// destination slice type's element type. +func CopyElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp(n, ir.OCOPY) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.X.Type()) +} + +// DeleteMapRType asserts that n is a "delete" operation, and returns +// an expression that yields the *runtime._type value representing the +// map type. +func DeleteMapRType(pos src.XPos, n *ir.CallExpr) ir.Node { + assertOp(n, ir.ODELETE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.Args[0].Type()) +} + +// IndexMapRType asserts that n is a map index operation, and returns +// an expression that yields the *runtime._type value representing the +// map type. +func IndexMapRType(pos src.XPos, n *ir.IndexExpr) ir.Node { + assertOp(n, ir.OINDEXMAP) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.X.Type()) +} + +// MakeChanRType asserts that n is a "make" operation for a channel +// type, and returns an expression that yields the *runtime._type +// value representing that channel type. +func MakeChanRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp(n, ir.OMAKECHAN) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return chanRType(pos, n.Type()) +} + +// MakeMapRType asserts that n is a "make" operation for a map type, +// and returns an expression that yields the *runtime._type value +// representing that map type. +func MakeMapRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp(n, ir.OMAKEMAP) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.Type()) +} + +// MakeSliceElemRType asserts that n is a "make" operation for a slice +// type, and returns an expression that yields the *runtime._type +// value representing that slice type's element type. +func MakeSliceElemRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp2(n, ir.OMAKESLICE, ir.OMAKESLICECOPY) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} + +// RangeMapRType asserts that n is a "range" loop over a map value, +// and returns an expression that yields the *runtime._type value +// representing that map type. +func RangeMapRType(pos src.XPos, n *ir.RangeStmt) ir.Node { + assertOp(n, ir.ORANGE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.X.Type()) +} + +// UnsafeSliceElemRType asserts that n is an "unsafe.Slice" operation, +// and returns an expression that yields the *runtime._type value +// representing the result slice type's element type. +func UnsafeSliceElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp(n, ir.OUNSAFESLICE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 21301ab1499..59085869ebc 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -842,9 +842,15 @@ func TypeLinksym(t *types.Type) *obj.LSym { return TypeSym(t).Linksym() } +// Deprecated: Use TypePtrAt instead. func TypePtr(t *types.Type) *ir.AddrExpr { - n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8]) - return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr) + return TypePtrAt(base.Pos, t) +} + +// TypePtrAt returns an expression that evaluates to the +// *runtime._type value for t. +func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr { + return typecheck.LinksymAddr(pos, TypeLinksym(t), types.Types[types.TUINT8]) } // ITabLsym returns the LSym representing the itab for concrete type typ implementing @@ -864,9 +870,15 @@ func ITabLsym(typ, iface *types.Type) *obj.LSym { return lsym } -// ITabAddr returns an expression representing a pointer to the itab -// for concrete type typ implementing interface iface. +// Deprecated: Use ITabAddrAt instead. func ITabAddr(typ, iface *types.Type) *ir.AddrExpr { + return ITabAddrAt(base.Pos, typ, iface) +} + +// ITabAddrAt returns an expression that evaluates to the +// *runtime.itab value for concrete type typ implementing interface +// iface. +func ITabAddrAt(pos src.XPos, typ, iface *types.Type) *ir.AddrExpr { s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString()) lsym := s.Linksym() @@ -874,8 +886,7 @@ func ITabAddr(typ, iface *types.Type) *ir.AddrExpr { writeITab(lsym, typ, iface, false) } - n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8]) - return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr) + return typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8]) } // needkeyupdate reports whether map updates with t as a key diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go index a76358967d1..1b564055d30 100644 --- a/src/cmd/compile/internal/ssa/debug_lines_test.go +++ b/src/cmd/compile/internal/ssa/debug_lines_test.go @@ -76,7 +76,7 @@ func TestDebugLinesPushback(t *testing.T) { fn := "(*List[go.shape.int_0]).PushBack" if buildcfg.Experiment.Unified { // Unified mangles differently - fn = "(*List[int]).PushBack" + fn = "(*List[int]).PushBack-shaped" } testDebugLines(t, "-N -l", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true) } @@ -95,7 +95,7 @@ func TestDebugLinesConvert(t *testing.T) { fn := "G[go.shape.int_0]" if buildcfg.Experiment.Unified { // Unified mangles differently - fn = "G[int]" + fn = "G[int]-shaped" } testDebugLines(t, "-N -l", "convertline.go", fn, []int{9, 10, 11}, true) } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 2ee027092e9..1e5313f95e9 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -664,7 +664,7 @@ func (s *state) paramsToHeap() { // newHeapaddr allocates heap memory for n and sets its heap address. func (s *state) newHeapaddr(n *ir.Name) { - s.setHeapaddr(n.Pos(), n, s.newObject(n.Type())) + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil)) } // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) @@ -692,23 +692,26 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { } // newObject returns an SSA value denoting new(typ). -func (s *state) newObject(typ *types.Type) *ssa.Value { +func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value { if typ.Size() == 0 { return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) } - return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0] + if rtype == nil { + rtype = s.reflectType(typ) + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0] } func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) { if !n.Type().IsPtr() { s.Fatalf("expected pointer type: %v", n.Type()) } - elem := n.Type().Elem() + elem, rtypeExpr := n.Type().Elem(), n.ElemRType if count != nil { if !elem.IsArray() { s.Fatalf("expected array type: %v", elem) } - elem = elem.Elem() + elem, rtypeExpr = elem.Elem(), n.ElemElemRType } size := elem.Size() // Casting from larger type to smaller one is ok, so for smallest type, do nothing. @@ -721,12 +724,20 @@ func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value if count.Type.Size() != s.config.PtrSize { s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize) } - s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, s.reflectType(elem), count) + var rtype *ssa.Value + if rtypeExpr != nil { + rtype = s.expr(rtypeExpr) + } else { + rtype = s.reflectType(elem) + } + s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count) } // reflectType returns an SSA value representing a pointer to typ's // reflection type descriptor. func (s *state) reflectType(typ *types.Type) *ssa.Value { + // TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs + // to supply RType expressions. lsym := reflectdata.TypeLinksym(typ) return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb) } @@ -3294,7 +3305,11 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { case ir.ONEW: n := n.(*ir.UnaryExpr) - return s.newObject(n.Type().Elem()) + var rtype *ssa.Value + if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE { + rtype = s.expr(x.RType) + } + return s.newObject(n.Type().Elem(), rtype) case ir.OUNSAFEADD: n := n.(*ir.BinaryExpr) @@ -6226,12 +6241,15 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if n.ITab != nil { targetItab = s.expr(n.ITab) } - return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok) + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok) } func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) { iface := s.expr(n.X) - var target, targetItab *ssa.Value + var source, target, targetItab *ssa.Value + if n.SrcRType != nil { + source = s.expr(n.SrcRType) + } if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() { byteptr := s.f.Config.Types.BytePtr targetItab = s.expr(n.ITab) @@ -6241,15 +6259,16 @@ func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, } else { target = s.expr(n.RType) } - return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok) + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok) } // dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T) // and src is the type we're asserting from. +// source is the *runtime._type of src // target is the *runtime._type of dst. // If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil. // commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails. -func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) { +func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) { byteptr := s.f.Config.Types.BytePtr if dst.IsInterface() { if dst.IsEmptyInterface() { @@ -6385,7 +6404,10 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targ if !commaok { // on failure, panic by calling panicdottype s.startBlock(bFail) - taddr := s.reflectType(src) + taddr := source + if taddr == nil { + taddr = s.reflectType(src) + } if src.IsEmptyInterface() { s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr) } else { diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index d5c4b8e1e84..43ec7b80a09 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -405,7 +405,7 @@ func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) { w.string(exportPath(pkg)) if mainIndex { w.string(pkg.Name) - w.uint64(uint64(pkg.Height)) + w.uint64(0) // was package height, but not necessary anymore. } // Sort symbols within a package by name. @@ -1978,6 +1978,7 @@ func (w *exportWriter) expr(n ir.Node) { w.pos(n.Pos()) w.typ(n.Type()) w.expr(n.X) + w.bool(n.Implicit()) case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: n := n.(*ir.UnaryExpr) diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 3a51f781f05..96aaac6362f 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -175,10 +175,9 @@ func ReadImports(pkg *types.Pkg, data string) { for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- { pkg := p.pkgAt(ird.uint64()) pkgName := p.stringAt(ird.uint64()) - pkgHeight := int(ird.uint64()) + _ = int(ird.uint64()) // was package height, but not necessary anymore. if pkg.Name == "" { pkg.Name = pkgName - pkg.Height = pkgHeight types.NumImport[pkgName]++ // TODO(mdempsky): This belongs somewhere else. @@ -187,9 +186,6 @@ func ReadImports(pkg *types.Pkg, data string) { if pkg.Name != pkgName { base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path) } - if pkg.Height != pkgHeight { - base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path) - } } for nSyms := ird.uint64(); nSyms > 0; nSyms-- { @@ -1493,7 +1489,9 @@ func (r *importReader) node() ir.Node { return n case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR: - return ir.NewConvExpr(r.pos(), op, r.typ(), r.expr()) + n := ir.NewConvExpr(r.pos(), op, r.typ(), r.expr()) + n.SetImplicit(r.bool()) + return n case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN, ir.OUNSAFEADD, ir.OUNSAFESLICE: pos := r.pos() diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index ffd00ec3a79..8295a4e5602 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -13,6 +13,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" ) @@ -119,6 +120,13 @@ func ComputeAddrtaken(top []ir.Node) { } } +// LinksymAddr returns a new expression that evaluates to the address +// of lsym. typ specifies the type of the addressed memory. +func LinksymAddr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *ir.AddrExpr { + n := ir.NewLinksymExpr(pos, lsym, typ) + return Expr(NodAddrAt(pos, n)).(*ir.AddrExpr) +} + func NodNil() ir.Node { n := ir.NewNilExpr(base.Pos) n.SetType(types.Types[types.TNIL]) @@ -293,24 +301,14 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { n = convlit1(n, t, false, context) if n.Type() == nil { - return n + base.Fatalf("cannot assign %v to %v", n, t) + } + if n.Type().IsUntyped() { + base.Fatalf("%L has untyped type", n) } if t.Kind() == types.TBLANK { return n } - - // Convert ideal bool from comparison to plain bool - // if the next step is non-bool (like interface{}). - if n.Type() == types.UntypedBool && !t.IsBoolean() { - if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL { - r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n) - r.SetType(types.Types[types.TBOOL]) - r.SetTypecheck(1) - r.SetImplicit(true) - n = r - } - } - if types.Identical(n.Type(), t) { return n } diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go index 4bf39a5e9df..9a214940172 100644 --- a/src/cmd/compile/internal/types/pkg.go +++ b/src/cmd/compile/internal/types/pkg.go @@ -16,9 +16,6 @@ import ( // pkgMap maps a package path to a package. var pkgMap = make(map[string]*Pkg) -// MaxPkgHeight is a height greater than any likely package height. -const MaxPkgHeight = 1e9 - type Pkg struct { Path string // string literal used in import statement, e.g. "runtime/internal/sys" Name string // package name, e.g. "sys" @@ -26,12 +23,6 @@ type Pkg struct { Syms map[string]*Sym Pathsym *obj.LSym - // Height is the package's height in the import graph. Leaf - // packages (i.e., packages with no imports) have height 0, - // and all other packages have height 1 plus the maximum - // height of their imported packages. - Height int - Direct bool // imported directly } diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 927ebc453a6..9d8707befa5 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -97,14 +97,7 @@ func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym { // Less reports whether symbol a is ordered before symbol b. // // Symbols are ordered exported before non-exported, then by name, and -// finally (for non-exported symbols) by package height and path. -// -// Ordering by package height is necessary to establish a consistent -// ordering for non-exported names with the same spelling but from -// different packages. We don't necessarily know the path for the -// package being compiled, but by definition it will have a height -// greater than any other packages seen within the compilation unit. -// For more background, see issue #24693. +// finally (for non-exported symbols) by package path. func (a *Sym) Less(b *Sym) bool { if a == b { return false @@ -131,9 +124,6 @@ func (a *Sym) Less(b *Sym) bool { return a.Name < b.Name } if !ea { - if a.Pkg.Height != b.Pkg.Height { - return a.Pkg.Height < b.Pkg.Height - } return a.Pkg.Path < b.Pkg.Path } return false diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go index df080f071c3..f3735618097 100644 --- a/src/cmd/compile/internal/types2/object.go +++ b/src/cmd/compile/internal/types2/object.go @@ -189,7 +189,7 @@ func (obj *object) sameId(pkg *Package, name string) bool { // // Objects are ordered nil before non-nil, exported before // non-exported, then by name, and finally (for non-exported -// functions) by package height and path. +// functions) by package path. func (a *object) less(b *object) bool { if a == b { return false @@ -215,9 +215,6 @@ func (a *object) less(b *object) bool { return a.name < b.name } if !ea { - if a.pkg.height != b.pkg.height { - return a.pkg.height < b.pkg.height - } return a.pkg.path < b.pkg.path } diff --git a/src/cmd/compile/internal/types2/package.go b/src/cmd/compile/internal/types2/package.go index 8044e7e6a76..26f10645d28 100644 --- a/src/cmd/compile/internal/types2/package.go +++ b/src/cmd/compile/internal/types2/package.go @@ -14,7 +14,6 @@ type Package struct { name string scope *Scope imports []*Package - height int complete bool fake bool // scope lookup errors are silently dropped if package is fake (internal use only) cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go @@ -23,14 +22,8 @@ type Package struct { // NewPackage returns a new Package for the given package path and name. // The package is not complete and contains no explicit imports. func NewPackage(path, name string) *Package { - return NewPackageHeight(path, name, 0) -} - -// NewPackageHeight is like NewPackage, but allows specifying the -// package's height. -func NewPackageHeight(path, name string, height int) *Package { scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path)) - return &Package{path: path, name: name, scope: scope, height: height} + return &Package{path: path, name: name, scope: scope} } // Path returns the package path. @@ -39,9 +32,6 @@ func (pkg *Package) Path() string { return pkg.path } // Name returns the package name. func (pkg *Package) Name() string { return pkg.name } -// Height returns the package height. -func (pkg *Package) Height() int { return pkg.height } - // SetName sets the package name. func (pkg *Package) SetName(name string) { pkg.name = name } diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go index 5d498b6b2b1..77881f493f6 100644 --- a/src/cmd/compile/internal/types2/resolver.go +++ b/src/cmd/compile/internal/types2/resolver.go @@ -197,7 +197,6 @@ func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package { // methods with receiver base type names. func (check *Checker) collectObjects() { pkg := check.pkg - pkg.height = 0 // pkgImports is the set of packages already imported by any package file seen // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate @@ -255,15 +254,6 @@ func (check *Checker) collectObjects() { continue } - if imp == Unsafe { - // typecheck ignores imports of package unsafe for - // calculating height. - // TODO(mdempsky): Revisit this. This seems fine, but I - // don't remember explicitly considering this case. - } else if h := imp.height + 1; h > pkg.height { - pkg.height = h - } - // local name overrides imported package name name := imp.name if s.LocalPkgName != nil { diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go index 17876d1f3c2..af82b3fa7ad 100644 --- a/src/cmd/compile/internal/types2/sizeof_test.go +++ b/src/cmd/compile/internal/types2/sizeof_test.go @@ -47,7 +47,7 @@ func TestSizeof(t *testing.T) { // Misc {Scope{}, 60, 104}, - {Package{}, 40, 80}, + {Package{}, 36, 72}, {_TypeSet{}, 28, 56}, } diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index c44d934f21e..1d922d983e6 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -99,10 +99,11 @@ func walkAssign(init *ir.Nodes, n ir.Node) ir.Node { } as.Y = r if r.Op() == ir.OAPPEND { + r := r.(*ir.CallExpr) // Left in place for back end. // Do not add a new write barrier. // Set up address of type for back end. - r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem()) + r.X = reflectdata.AppendElemRType(base.Pos, r) return as } // Otherwise, lowered for race detector. @@ -169,11 +170,11 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { var call *ir.CallExpr if w := t.Elem().Size(); w <= zeroValSize { fn := mapfn(mapaccess2[fast], t, false) - call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key) } else { fn := mapfn("mapaccess2_fat", t, true) z := reflectdata.ZeroAddr(w) - call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z) + call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z) } // mapaccess2* returns a typed bool, but due to spec changes, @@ -502,7 +503,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), s, nn))} nodes.Append(nif) // s = s[:n] @@ -523,7 +524,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem()) ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) ptr2, len2 := backingArrayPtrLen(l2) - ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2) } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { // rely on runtime to instrument: // copy(s[len(l1):], l2) @@ -670,7 +671,7 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { fn = typecheck.SubstArgTypes(fn, elemtype, elemtype) // s = growslice(T, s, n) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))} + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), s, nn))} nodes = append(nodes, nif) // s = s[:n] diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index a11031b3d0b..7e84f28217f 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -87,7 +87,7 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { fn := typecheck.LookupRuntime("growslice") // growslice(, old []T, mincap int) (ret []T) fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem()) - nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns, + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), ns, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))} l = append(l, nif) @@ -141,7 +141,7 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { ptrL, lenL := backingArrayPtrLen(n.X) n.Y = cheapExpr(n.Y, init) ptrR, lenR := backingArrayPtrLen(n.Y) - return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR) + return mkcall1(fn, n.Type(), init, reflectdata.CopyElemRType(base.Pos, n), ptrL, lenL, ptrR, lenR) } if runtimecall { @@ -214,7 +214,7 @@ func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node { t := map_.Type() fast := mapfast(t) key = mapKeyArg(fast, n, key, false) - return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) + return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.DeleteMapRType(base.Pos, n), map_, key) } // walkLenCap walks an OLEN or OCAP node. @@ -258,7 +258,7 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } - return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype)) + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.MakeChanRType(base.Pos, n), typecheck.Conv(size, argtype)) } // walkMakeMap walks an OMAKEMAP node. @@ -356,7 +356,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { fn := typecheck.LookupRuntime(fnname) fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h) + return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h) } // walkMakeSlice walks an OMAKESLICE node. @@ -421,7 +421,7 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { argtype = types.Types[types.TINT] } fn := typecheck.LookupRuntime(fnname) - ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) ptr.MarkNonNil() len = typecheck.Conv(len, types.Types[types.TINT]) cap = typecheck.Conv(cap, types.Types[types.TINT]) @@ -475,7 +475,7 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // Replace make+copy with runtime.makeslicecopy. // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := typecheck.LookupRuntime("makeslicecopy") - ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) ptr.MarkNonNil() sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length) return walkExpr(typecheck.Expr(sh), init) @@ -658,7 +658,7 @@ func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { if ir.ShouldCheckPtr(ir.CurFunc, 1) { fnname := "unsafeslicecheckptr" fn := typecheck.LookupRuntime(fnname) - init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(sliceType.Elem()), unsafePtr, typecheck.Conv(len, lenType))) + init.Append(mkcall1(fn, nil, init, reflectdata.UnsafeSliceElemRType(base.Pos, n), unsafePtr, typecheck.Conv(len, lenType))) } else { // Otherwise, open code unsafe.Slice to prevent runtime call overhead. // Keep this code in sync with runtime.unsafeslice{,64} diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go index 6a8ad56d756..df7cb731f7b 100644 --- a/src/cmd/compile/internal/walk/compare.go +++ b/src/cmd/compile/internal/walk/compare.go @@ -54,6 +54,10 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // Given mixed interface/concrete comparison, // rewrite into types-equal && data-equal. // This is efficient, avoids allocations, and avoids runtime calls. + // + // TODO(mdempsky): It would be more general and probably overall + // simpler to just extend walkCompareInterface to optimize when one + // operand is an OCONVIFACE. if n.X.Type().IsInterface() != n.Y.Type().IsInterface() { // Preserve side-effects in case of short-circuiting; see #32187. l := cheapExpr(n.X, init) @@ -74,9 +78,12 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { // l.tab == type(r) // For non-empty interface, this is: // l.tab != nil && l.tab._type == type(r) + // + // TODO(mdempsky): For non-empty interface comparisons, just + // compare against the itab address directly? var eqtype ir.Node tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) - rtyp := reflectdata.TypePtr(r.Type()) + rtyp := reflectdata.CompareRType(base.Pos, n) if l.Type().IsEmptyInterface() { tab.SetType(types.NewPtr(types.Types[types.TUINT8])) tab.SetTypecheck(1) diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 595fe8538ca..7dec9ae6d8b 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -414,9 +414,10 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { // make the map var - a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil) + args := []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(n.Len + int64(len(n.List)))} + a := typecheck.Expr(ir.NewCallExpr(base.Pos, ir.OMAKE, nil, args)).(*ir.MakeExpr) + a.RType = n.RType a.SetEsc(n.Esc()) - a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(n.Len + int64(len(n.List)))} appendWalkStmt(init, ir.NewAssignStmt(base.Pos, m, a)) entries := n.List @@ -467,14 +468,18 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { kidx := ir.NewIndexExpr(base.Pos, vstatk, i) kidx.SetBounded(true) - lhs := ir.NewIndexExpr(base.Pos, m, kidx) + + // typechecker rewrites OINDEX to OINDEXMAP + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, kidx)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0)) cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem())) incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1))) var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs) - body = typecheck.Stmt(body) // typechecker rewrites OINDEX to OINDEXMAP + body = typecheck.Stmt(body) body = orderStmtInPlace(body, map[string][]*ir.Name{}) loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) @@ -503,8 +508,14 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem)) ir.SetPos(tmpelem) - var a ir.Node = ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem) - a = typecheck.Stmt(a) // typechecker rewrites OINDEX to OINDEXMAP + + // typechecker rewrites OINDEX to OINDEXMAP + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, tmpkey)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType + + var a ir.Node = ir.NewAssignStmt(base.Pos, lhs, tmpelem) + a = typecheck.Stmt(a) a = orderStmtInPlace(a, map[string][]*ir.Name{}) appendWalkStmt(init, a) } diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index 72631e7dfb0..753dbc3e887 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -14,7 +14,6 @@ import ( "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" - "cmd/internal/src" "cmd/internal/sys" ) @@ -50,13 +49,8 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { } if !fromType.IsInterface() { - var typeWord ir.Node - if toType.IsEmptyInterface() { - typeWord = reflectdata.TypePtr(fromType) - } else { - typeWord = reflectdata.ITabAddr(fromType, toType) - } - l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone)) + typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n) + l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n, init)) l.SetType(toType) l.SetTypecheck(n.Typecheck()) return l @@ -95,7 +89,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { fn := typecheck.LookupRuntime("convI2I") types.CalcSize(fn.Type()) call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args = []ir.Node{reflectdata.TypePtr(toType), itab} + call.Args = []ir.Node{reflectdata.ConvIfaceTypeWord(base.Pos, n), itab} typeWord = walkExpr(typecheck.Expr(call), init) } @@ -107,10 +101,10 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { return e } -// Returns the data word (the second word) used to represent n in an interface. -// n must not be of interface type. -// esc describes whether the result escapes. -func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node { +// Returns the data word (the second word) used to represent conv.X in +// an interface. +func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node { + pos, n := conv.Pos(), conv.X fromType := n.Type() // If it's a pointer, it is its own representation. @@ -150,7 +144,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node { case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly(): // n is a readonly global; use it directly. value = n - case !escapes && fromType.Size() <= 1024: + case conv.Esc() == ir.EscNone && fromType.Size() <= 1024: // n does not escape. Use a stack temporary initialized to n. value = typecheck.Temp(fromType) init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n))) @@ -176,7 +170,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node { n = copyExpr(n, fromType, init) } fn = typecheck.SubstArgTypes(fn, fromType) - args = []ir.Node{reflectdata.TypePtr(fromType), typecheck.NodAddr(n)} + args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)} } else { // Use a specialized conversion routine that takes the type being // converted by value, not by pointer. @@ -211,7 +205,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node { // walkConvIData walks an OCONVIDATA node. func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node { n.X = walkExpr(n.X, init) - return dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone) + return dataWord(n, init) } // walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node. diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 803a07ae73e..83fcea38d5b 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -782,7 +782,7 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { t := map_.Type() fast := mapfast(t) key := mapKeyArg(fast, n, n.Index, n.Assigned) - args := []ir.Node{reflectdata.TypePtr(t), map_, key} + args := []ir.Node{reflectdata.IndexMapRType(base.Pos, n), map_, key} var mapFn ir.Node switch { diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 2d1e88238cc..91a2f73cc62 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -993,7 +993,7 @@ func (o *orderState) stmt(n ir.Node) { do(0, recv.X.Type().Elem()) do(1, types.Types[types.TBOOL]) if len(init) != 0 { - ir.DumpList("ninit", r.Init()) + ir.DumpList("ninit", init) base.Fatalf("ninit on select recv") } orderBlock(ncas.PtrInit(), o.free) @@ -1456,8 +1456,12 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { // Emit eval+insert of dynamic entries, one at a time. for _, r := range dynamics { - as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value) - typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType + + as := ir.NewAssignStmt(base.Pos, lhs, r.Value) + typecheck.Stmt(as) o.stmt(as) } diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 6c30fa28772..b697c243c7c 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -38,11 +38,7 @@ func cheapComputableIndex(width int64) bool { // the returned node. func walkRange(nrange *ir.RangeStmt) ir.Node { if isMapClear(nrange) { - m := nrange.X - lno := ir.SetPos(m) - n := mapClear(m) - base.Pos = lno - return n + return mapClear(nrange) } nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil) @@ -107,7 +103,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { // for v1 := range ha { body } if v2 == nil { - body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)} + body = []ir.Node{rangeAssign(nrange, hv1)} break } @@ -116,10 +112,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { // v1, v2 = hv1, ha[hv1] tmp := ir.NewIndexExpr(base.Pos, ha, hv1) tmp.SetBounded(true) - // Use OAS2 to correctly handle assignments - // of the form "v1, a[v1] := range". - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, tmp}) - body = []ir.Node{a} + body = []ir.Node{rangeAssign2(nrange, hv1, tmp)} break } @@ -144,9 +137,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { tmp.SetBounded(true) init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp))) - // Use OAS2 to correctly handle assignments - // of the form "v1, a[v1] := range". - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}) + a := rangeAssign2(nrange, hv1, ir.NewStarExpr(base.Pos, hp)) body = append(body, a) // Advance pointer as part of the late increment. @@ -172,7 +163,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { fn := typecheck.LookupRuntime("mapiterinit") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th) - init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit))) + init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit))) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil()) fn = typecheck.LookupRuntime("mapiternext") @@ -183,11 +174,10 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { if v1 == nil { body = nil } else if v2 == nil { - body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)} + body = []ir.Node{rangeAssign(nrange, key)} } else { elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym)) - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{key, elem}) - body = []ir.Node{a} + body = []ir.Node{rangeAssign2(nrange, key, elem)} } case types.TCHAN: @@ -210,7 +200,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { if v1 == nil { body = nil } else { - body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)} + body = []ir.Node{rangeAssign(nrange, hv1)} } // Zero hv1. This prevents hv1 from being the sole, inaccessible // reference to an otherwise GC-able value during the next channel receive. @@ -275,11 +265,10 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { if v1 != nil { if v2 != nil { // v1, v2 = hv1t, hv2 - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1t, hv2}) - body = append(body, a) + body = append(body, rangeAssign2(nrange, hv1t, hv2)) } else { // v1 = hv1t - body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t)) + body = append(body, rangeAssign(nrange, hv1t)) } } } @@ -314,6 +303,36 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { return n } +// rangeAssign returns "n.Key = key". +func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node { + key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType) + return ir.NewAssignStmt(n.Pos(), n.Key, key) +} + +// rangeAssign2 returns "n.Key, n.Value = key, value". +func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node { + // Use OAS2 to correctly handle assignments + // of the form "v1, a[v1] = range". + key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType) + value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType) + return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value}) +} + +// rangeConvert returns src, converted to dst if necessary. If a +// conversion is necessary, then typeWord and srcRType are copied to +// their respective ConvExpr fields. +func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node { + src = typecheck.Expr(src) + if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) { + return src + } + + n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src) + n.TypeWord = typeWord + n.SrcRType = srcRType + return typecheck.Expr(n) +} + // isMapClear checks if n is of the form: // // for k := range m { @@ -360,13 +379,17 @@ func isMapClear(n *ir.RangeStmt) bool { } // mapClear constructs a call to runtime.mapclear for the map m. -func mapClear(m ir.Node) ir.Node { +func mapClear(nrange *ir.RangeStmt) ir.Node { + m := nrange.X + origPos := ir.SetPos(m) + defer func() { base.Pos = origPos }() + t := m.Type() // instantiate mapclear(typ *type, hmap map[any]any) fn := typecheck.LookupRuntime("mapclear") fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) - n := mkcallstmt1(fn, reflectdata.TypePtr(t), m) + n := mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), m) return walkStmt(typecheck.Stmt(n)) } diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index 6cac8f29377..257903c0b3c 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -85,8 +85,12 @@ func walkSwitchExpr(sw *ir.SwitchStmt) { defaultGoto = jmp } - for _, n1 := range ncase.List { - s.Add(ncase.Pos(), n1, jmp) + for i, n1 := range ncase.List { + var rtype ir.Node + if i < len(ncase.RTypes) { + rtype = ncase.RTypes[i] + } + s.Add(ncase.Pos(), n1, rtype, jmp) } // Process body. @@ -124,11 +128,12 @@ type exprSwitch struct { type exprClause struct { pos src.XPos lo, hi ir.Node + rtype ir.Node // *runtime._type for OEQ node jmp ir.Node } -func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) { - c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp} +func (s *exprSwitch) Add(pos src.XPos, expr, rtype, jmp ir.Node) { + c := exprClause{pos: pos, lo: expr, hi: expr, rtype: rtype, jmp: jmp} if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL { s.clauses = append(s.clauses, c) return @@ -233,7 +238,7 @@ func (s *exprSwitch) flush() { // Add length case to outer switch. cas := ir.NewBasicLit(pos, constant.MakeInt64(runLen(run))) jmp := ir.NewBranchStmt(pos, ir.OGOTO, label) - outer.Add(pos, cas, jmp) + outer.Add(pos, cas, nil, jmp) } s.done.Append(ir.NewLabelStmt(s.pos, outerLabel)) outer.Emit(&s.done) @@ -342,7 +347,9 @@ func (c *exprClause) test(exprname ir.Node) ir.Node { } } - return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo) + n := ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo) + n.RType = c.rtype + return n } func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { diff --git a/src/go/internal/gcimporter/gcimporter_test.go b/src/go/internal/gcimporter/gcimporter_test.go index b32de179103..68a077c190b 100644 --- a/src/go/internal/gcimporter/gcimporter_test.go +++ b/src/go/internal/gcimporter/gcimporter_test.go @@ -583,6 +583,30 @@ func TestIssue13566(t *testing.T) { } } +func TestTypeNamingOrder(t *testing.T) { + skipSpecialPlatforms(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + testoutdir := filepath.Join(tmpdir, "testdata") + + compile(t, "testdata", "g.go", testoutdir) + + // import must succeed (test for issue at hand) + _ = importPkg(t, "./testdata/g", tmpdir) +} + func TestIssue13898(t *testing.T) { skipSpecialPlatforms(t) diff --git a/src/go/internal/gcimporter/testdata/g.go b/src/go/internal/gcimporter/testdata/g.go new file mode 100644 index 00000000000..301c1429e6c --- /dev/null +++ b/src/go/internal/gcimporter/testdata/g.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Input for TestTypeNamingOrder + +// ensures that the order in which "type A B" declarations are +// processed is correct; this was a problem for unified IR imports. + +package g + +type Client struct { + common service + A *AService + B *BService +} + +type service struct { + client *Client +} + +type AService service +type BService service diff --git a/src/go/internal/gcimporter/ureader.go b/src/go/internal/gcimporter/ureader.go index 3b14232c819..97f0664fe3c 100644 --- a/src/go/internal/gcimporter/ureader.go +++ b/src/go/internal/gcimporter/ureader.go @@ -31,6 +31,8 @@ type pkgReader struct { // laterFns holds functions that need to be invoked at the end of // import reading. laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int } // later adds a function to be invoked at the end of import reading. @@ -38,6 +40,15 @@ func (pr *pkgReader) later(fn func()) { pr.laterFns = append(pr.laterFns, fn) } +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + // readUnifiedPackage reads a package description from the given // unified IR export data decoder. func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { @@ -60,7 +71,7 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + r.Bool() // TODO(mdempsky): Remove; was "has init" for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, @@ -198,21 +209,49 @@ func (r *reader) doPkg() *types.Package { } name := r.String() - height := r.Len() - // Was: "pkg := types.NewPackageHeight(path, name, height)" - pkg, _ := types.NewPackage(path, name), height + pkg := types.NewPackage(path, name) r.p.imports[path] = pkg imports := make([]*types.Package, r.Len()) for i := range imports { imports[i] = r.pkg() } - pkg.SetImports(imports) + + // The documentation for (*types.Package).Imports requires + // flattening the import graph when reading from export data, as + // obviously incorrect as that is. + // + // TODO(mdempsky): Remove this if go.dev/issue/54096 is accepted. + pkg.SetImports(flattenImports(imports)) return pkg } +// flattenImports returns the transitive closure of all imported +// packages rooted from pkgs. +func flattenImports(pkgs []*types.Package) []*types.Package { + var res []*types.Package + + seen := make(map[*types.Package]bool) + var add func(pkg *types.Package) + add = func(pkg *types.Package) { + if seen[pkg] { + return + } + seen[pkg] = true + res = append(res, pkg) + for _, imp := range pkg.Imports() { + add(imp) + } + } + + for _, pkg := range pkgs { + add(pkg) + } + return res +} + // @@@ Types func (r *reader) typ() types.Type { @@ -459,7 +498,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // unit tests expected that), but cmd/compile doesn't care // about it, so maybe we can avoid worrying about that here. rhs := r.typ() - r.p.later(func() { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } underlying := rhs.Underlying() named.SetUnderlying(underlying) }) diff --git a/src/internal/pkgbits/decoder.go b/src/internal/pkgbits/decoder.go index 0b5fd9705c2..5e233b87705 100644 --- a/src/internal/pkgbits/decoder.go +++ b/src/internal/pkgbits/decoder.go @@ -18,6 +18,12 @@ import ( // A PkgDecoder provides methods for decoding a package's Unified IR // export data. type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + // pkgPath is the package path for the package to be decoded. // // TODO(mdempsky): Remove; unneeded since CL 391014. @@ -52,6 +58,9 @@ type PkgDecoder struct { // TODO(mdempsky): Remove; unneeded since CL 391014. func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. @@ -67,9 +76,18 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - var version uint32 - assert(binary.Read(r, binary.LittleEndian, &version) == nil) - assert(version == 0) + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) @@ -215,7 +233,7 @@ func (r *Decoder) rawReloc(k RelocKind, idx int) Index { // // If EnableSync is false, then Sync is a no-op. func (r *Decoder) Sync(mWant SyncMarker) { - if !EnableSync { + if !r.common.sync { return } diff --git a/src/internal/pkgbits/encoder.go b/src/internal/pkgbits/encoder.go index 1326a135cf8..ec47e352cbe 100644 --- a/src/internal/pkgbits/encoder.go +++ b/src/internal/pkgbits/encoder.go @@ -14,6 +14,16 @@ import ( "runtime" ) +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +// +// TODO(mdempsky): For the next version bump: +// - remove the legacy "has init" bool from the public root +const currentVersion uint32 = 1 + // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { @@ -25,15 +35,21 @@ type PkgEncoder struct { // elems[RelocString][stringsIdx[s]] == s (if present). stringsIdx map[string]Index + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. syncFrames int } +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + // NewPkgEncoder returns an initialized PkgEncoder. // // syncFrames is the number of caller frames that should be serialized // at Sync points. Serializing additional frames results in larger // export data files, but can help diagnosing desync errors in -// higher-level Unified IR reader/writer code. +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. func NewPkgEncoder(syncFrames int) PkgEncoder { return PkgEncoder{ stringsIdx: make(map[string]Index), @@ -51,7 +67,13 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(0) // version + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -204,7 +226,7 @@ func (w *Encoder) rawReloc(r RelocKind, idx Index) int { } func (w *Encoder) Sync(m SyncMarker) { - if !EnableSync { + if !w.p.SyncMarkers() { return } @@ -297,8 +319,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into diff --git a/src/internal/pkgbits/flags.go b/src/internal/pkgbits/flags.go new file mode 100644 index 00000000000..654222745fa --- /dev/null +++ b/src/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/src/internal/pkgbits/sync.go b/src/internal/pkgbits/sync.go index 72f776af05d..1520b73afb9 100644 --- a/src/internal/pkgbits/sync.go +++ b/src/internal/pkgbits/sync.go @@ -10,17 +10,6 @@ import ( "strings" ) -// EnableSync controls whether sync markers are written into unified -// IR's export data format and also whether they're expected when -// reading them back in. They're inessential to the correct -// functioning of unified IR, but are helpful during development to -// detect mistakes. -// -// When sync is enabled, writer stack frames will also be included in -// the export data. Currently, a fixed number of frames are included, -// controlled by -d=syncframes (default 0). -const EnableSync = true - // fmtFrames formats a backtrace for reporting reader/writer desyncs. func fmtFrames(pcs ...uintptr) []string { res := make([]string, 0, len(pcs)) @@ -109,6 +98,7 @@ const ( SyncExprs SyncExpr SyncExprType + SyncAssign SyncOp SyncFuncLit SyncCompLit @@ -139,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/src/internal/pkgbits/syncmarker_string.go b/src/internal/pkgbits/syncmarker_string.go index 39db9eddad2..4a5b0ca5f2f 100644 --- a/src/internal/pkgbits/syncmarker_string.go +++ b/src/internal/pkgbits/syncmarker_string.go @@ -45,39 +45,40 @@ func _() { _ = x[SyncExprs-35] _ = x[SyncExpr-36] _ = x[SyncExprType-37] - _ = x[SyncOp-38] - _ = x[SyncFuncLit-39] - _ = x[SyncCompLit-40] - _ = x[SyncDecl-41] - _ = x[SyncFuncBody-42] - _ = x[SyncOpenScope-43] - _ = x[SyncCloseScope-44] - _ = x[SyncCloseAnotherScope-45] - _ = x[SyncDeclNames-46] - _ = x[SyncDeclName-47] - _ = x[SyncStmts-48] - _ = x[SyncBlockStmt-49] - _ = x[SyncIfStmt-50] - _ = x[SyncForStmt-51] - _ = x[SyncSwitchStmt-52] - _ = x[SyncRangeStmt-53] - _ = x[SyncCaseClause-54] - _ = x[SyncCommClause-55] - _ = x[SyncSelectStmt-56] - _ = x[SyncDecls-57] - _ = x[SyncLabeledStmt-58] - _ = x[SyncUseObjLocal-59] - _ = x[SyncAddLocal-60] - _ = x[SyncLinkname-61] - _ = x[SyncStmt1-62] - _ = x[SyncStmtsEnd-63] - _ = x[SyncLabel-64] - _ = x[SyncOptLabel-65] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprAssertTypeOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 228, 230, 237, 244, 248, 256, 265, 275, 292, 301, 309, 314, 323, 329, 336, 346, 355, 365, 375, 385, 390, 401, 412, 420, 428, 433, 441, 446, 454} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} func (i SyncMarker) String() string { i -= 1 diff --git a/test/escape_iface.go b/test/escape_iface.go index dba08e3cb33..986228129a6 100644 --- a/test/escape_iface.go +++ b/test/escape_iface.go @@ -234,16 +234,6 @@ func dotTypeEscape2() { // #13805, #15796 *(&v) = x.(int) *(&v), *(&ok) = y.(int) } - { - i := 0 - j := 0 - var ok bool - var x interface{} = i // ERROR "i does not escape" - var y interface{} = j // ERROR "j does not escape" - - sink = x.(int) // ERROR "x.\(int\) escapes to heap" - sink, *(&ok) = y.(int) - } { i := 0 // ERROR "moved to heap: i" j := 0 // ERROR "moved to heap: j" diff --git a/test/escape_iface_nounified.go b/test/escape_iface_nounified.go new file mode 100644 index 00000000000..1d267bcd185 --- /dev/null +++ b/test/escape_iface_nounified.go @@ -0,0 +1,25 @@ +// errorcheck -0 -m -l +//go:build !goexperiment.unified +// +build !goexperiment.unified + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +var sink interface{} + +func dotTypeEscape2() { // #13805, #15796 + { + i := 0 + j := 0 + var ok bool + var x interface{} = i // ERROR "i does not escape" + var y interface{} = j // ERROR "j does not escape" + + sink = x.(int) // ERROR "x.\(int\) escapes to heap" + // BAD: should be "y.\(int\) escapes to heap" too + sink, *(&ok) = y.(int) + } +} diff --git a/test/escape_iface_unified.go b/test/escape_iface_unified.go new file mode 100644 index 00000000000..80222dae5fe --- /dev/null +++ b/test/escape_iface_unified.go @@ -0,0 +1,24 @@ +// errorcheck -0 -m -l +//go:build goexperiment.unified +// +build goexperiment.unified + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +var sink interface{} + +func dotTypeEscape2() { // #13805, #15796 + { + i := 0 + j := 0 + var ok bool + var x interface{} = i // ERROR "i does not escape" + var y interface{} = j // ERROR "j does not escape" + + sink = x.(int) // ERROR "x.\(int\) escapes to heap" + sink, *(&ok) = y.(int) // ERROR "autotmp_.* escapes to heap" + } +} diff --git a/test/fixedbugs/issue27836.dir/Äfoo.go b/test/fixedbugs/issue27836.dir/Äfoo.go deleted file mode 100644 index 8b6a814c3c4..00000000000 --- a/test/fixedbugs/issue27836.dir/Äfoo.go +++ /dev/null @@ -1,13 +0,0 @@ -package Äfoo - -var ÄbarV int = 101 - -func Äbar(x int) int { - defer func() { ÄbarV += 3 }() - return Äblix(x) -} - -func Äblix(x int) int { - defer func() { ÄbarV += 9 }() - return ÄbarV + x -} diff --git a/test/fixedbugs/issue27836.dir/Ämain.go b/test/fixedbugs/issue27836.dir/Ämain.go deleted file mode 100644 index 25d2c71fc00..00000000000 --- a/test/fixedbugs/issue27836.dir/Ämain.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "fmt" - - "./Äfoo" - Äblix "./Äfoo" -) - -func main() { - fmt.Printf("Äfoo.Äbar(33) returns %v\n", Äfoo.Äbar(33)) - fmt.Printf("Äblix.Äbar(33) returns %v\n", Äblix.Äbar(33)) -} diff --git a/test/fixedbugs/issue27836.dir/Þfoo.go b/test/fixedbugs/issue27836.dir/Þfoo.go new file mode 100644 index 00000000000..ea6be0f49fd --- /dev/null +++ b/test/fixedbugs/issue27836.dir/Þfoo.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package Þfoo + +var ÞbarV int = 101 + +func Þbar(x int) int { + defer func() { ÞbarV += 3 }() + return Þblix(x) +} + +func Þblix(x int) int { + defer func() { ÞbarV += 9 }() + return ÞbarV + x +} diff --git a/test/fixedbugs/issue27836.dir/Þmain.go b/test/fixedbugs/issue27836.dir/Þmain.go new file mode 100644 index 00000000000..596c620d80a --- /dev/null +++ b/test/fixedbugs/issue27836.dir/Þmain.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + + "./Þfoo" + Þblix "./Þfoo" +) + +func main() { + fmt.Printf("Þfoo.Þbar(33) returns %v\n", Þfoo.Þbar(33)) + fmt.Printf("Þblix.Þbar(33) returns %v\n", Þblix.Þbar(33)) +} diff --git a/test/fixedbugs/issue32187.go b/test/fixedbugs/issue32187.go index 9c8c9c26d79..268da8112ff 100644 --- a/test/fixedbugs/issue32187.go +++ b/test/fixedbugs/issue32187.go @@ -36,7 +36,11 @@ func main() { {"type assertion", "", func() { _ = x == x.(*int) }}, {"out of bounds", "", func() { _ = x == s[1] }}, {"nil pointer dereference #1", "", func() { _ = x == *p }}, - {"nil pointer dereference #2", "nil pointer dereference", func() { _ = *l == r[0] }}, + // TODO(mdempsky): Restore "nil pointer dereference" check. The Go + // spec doesn't mandate an order for panics (or even panic + // messages), but left-to-right is less confusing to users. + {"nil pointer dereference #2", "", func() { _ = *l == r[0] }}, + {"nil pointer dereference #3", "", func() { _ = *l == any(r[0]) }}, } for _, tc := range tests { @@ -44,16 +48,14 @@ func main() { } } -func testFuncShouldPanic(name, errStr string, f func()) { +func testFuncShouldPanic(name, want string, f func()) { defer func() { e := recover() if e == nil { log.Fatalf("%s: comparison did not panic\n", name) } - if errStr != "" { - if !strings.Contains(e.(error).Error(), errStr) { - log.Fatalf("%s: wrong panic message\n", name) - } + if have := e.(error).Error(); !strings.Contains(have, want) { + log.Fatalf("%s: wrong panic message: have %q, want %q\n", name, have, want) } }() f() diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 652aa321226..8cd93b8db45 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -7,7 +7,7 @@ package b import "./a" func g() { - h := a.E() // ERROR "inlining call to a.E" "a.I\(a.T\(0\)\) does not escape" + h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to a.T" // BAD: T(0) could be stack allocated. diff --git a/test/fixedbugs/issue52128.dir/a.go b/test/fixedbugs/issue52128.dir/a.go new file mode 100644 index 00000000000..0abf831c6fd --- /dev/null +++ b/test/fixedbugs/issue52128.dir/a.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type I interface{} + +type F func() + +type s struct { + f F +} + +func NewWithF(f F) *s { + return &s{f: f} +} + +func NewWithFuncI(func() I) *s { + return &s{} +} diff --git a/test/fixedbugs/issue52128.dir/b.go b/test/fixedbugs/issue52128.dir/b.go new file mode 100644 index 00000000000..86f6ed7e056 --- /dev/null +++ b/test/fixedbugs/issue52128.dir/b.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import ( + "./a" +) + +type S struct{} + +func (s *S) M1() a.I { + return a.NewWithF(s.M2) +} + +func (s *S) M2() {} diff --git a/test/fixedbugs/issue52128.dir/p.go b/test/fixedbugs/issue52128.dir/p.go new file mode 100644 index 00000000000..d3f3dbbfb9b --- /dev/null +++ b/test/fixedbugs/issue52128.dir/p.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "./a" + "./b" +) + +func f() { + a.NewWithFuncI((&b.S{}).M1) +} diff --git a/test/fixedbugs/issue52128.go b/test/fixedbugs/issue52128.go new file mode 100644 index 00000000000..8bb5c3e2139 --- /dev/null +++ b/test/fixedbugs/issue52128.go @@ -0,0 +1,7 @@ +// compiledir + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go index 65be4b5bbee..f9efb7f55dd 100644 --- a/test/fixedbugs/issue7921.go +++ b/test/fixedbugs/issue7921.go @@ -41,7 +41,7 @@ func bufferNoEscape3(xs []string) string { // ERROR "xs does not escape$" func bufferNoEscape4() []byte { var b bytes.Buffer - b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" "string\(.*\) escapes to heap" + b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" `".+" escapes to heap` useBuffer(&b) return b.Bytes() // ERROR "inlining call to bytes.\(\*Buffer\).Bytes$" } diff --git a/test/inline.go b/test/inline.go index 400898bceeb..04ba16858fa 100644 --- a/test/inline.go +++ b/test/inline.go @@ -107,18 +107,6 @@ func q(x int) int { // ERROR "can inline q" return foo() // ERROR "inlining call to q.func1" } -func r(z int) int { - foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape" - return x + z - } - bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2" - return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3" - return 2*y + x*z - }(x) // ERROR "inlining call to r.func2.1" - } - return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3" -} - func s0(x int) int { // ERROR "can inline s0" foo := func() { // ERROR "can inline s0.func1" "func literal does not escape" x = x + 1 diff --git a/test/inline_nounified.go b/test/inline_nounified.go new file mode 100644 index 00000000000..7a9fc100716 --- /dev/null +++ b/test/inline_nounified.go @@ -0,0 +1,21 @@ +// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1 +//go:build !goexperiment.unified +// +build !goexperiment.unified + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +func r(z int) int { + foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape" + return x + z + } + bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2" + return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3" + return 2*y + x*z + }(x) // ERROR "inlining call to r.func2.1" + } + return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3" +} diff --git a/test/inline_unified.go b/test/inline_unified.go new file mode 100644 index 00000000000..ff70e441510 --- /dev/null +++ b/test/inline_unified.go @@ -0,0 +1,21 @@ +// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1 +//go:build goexperiment.unified +// +build goexperiment.unified + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +func r(z int) int { + foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape" + return x + z + } + bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2" + return x + func(y int) int { // ERROR "can inline r.func2.1" + return 2*y + x*z + }(x) // ERROR "inlining call to r.func2.1" + } + return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "can inline r.func3" "inlining call to r.func3" +} diff --git a/test/live_regabi.go b/test/live_regabi.go index aac9a7766c0..59be1863fc3 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -1,4 +1,5 @@ // errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off +//go:build (amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs) // +build amd64,goexperiment.regabiargs arm64,goexperiment.regabiargs // Copyright 2014 The Go Authors. All rights reserved. @@ -601,7 +602,7 @@ func f38(b bool) { printnl() case *fi38(2) = <-fc38(): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$" printnl() - case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" + case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to f[ibc]38:( .autotmp_[0-9]+)+$" printnl() } printnl() diff --git a/test/nilcheck.go b/test/nilcheck.go index 6879438e9cd..e81db6dcb07 100644 --- a/test/nilcheck.go +++ b/test/nilcheck.go @@ -184,6 +184,7 @@ func f4(x *[10]int) { func f5(m map[string]struct{}) bool { // Existence-only map lookups should not generate a nil check - _, ok := m[""] + tmp1, tmp2 := m[""] // ERROR "removed nil check" + _, ok := tmp1, tmp2 return ok } diff --git a/test/run.go b/test/run.go index b2902f190c3..4a992037615 100644 --- a/test/run.go +++ b/test/run.go @@ -1980,8 +1980,11 @@ var types2Failures32Bit = setOf( ) var go118Failures = setOf( - "typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics - "typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error + "typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics + "typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error + "typeparam/mdempsky/16.go", // 1.18 compiler uses interface shape type in failed type assertions + "typeparam/mdempsky/17.go", // 1.18 compiler mishandles implicit conversions from range loops + "typeparam/mdempsky/18.go", // 1.18 compiler mishandles implicit conversions in select statements ) // In all of these cases, the 1.17 compiler reports reasonable errors, but either the @@ -2009,18 +2012,10 @@ var _ = setOf( ) var unifiedFailures = setOf( - "closure3.go", // unified IR numbers closures differently than -d=inlfuncswithclosures - "escape4.go", // unified IR can inline f5 and f6; test doesn't expect this - "inline.go", // unified IR reports function literal diagnostics on different lines than -d=inlfuncswithclosures - "linkname3.go", // unified IR is missing some linkname errors + "closure3.go", // unified IR numbers closures differently than -d=inlfuncswithclosures + "escape4.go", // unified IR can inline f5 and f6; test doesn't expect this - "fixedbugs/issue42284.go", // prints "T(0) does not escape", but test expects "a.I(a.T(0)) does not escape" - "fixedbugs/issue7921.go", // prints "… escapes to heap", but test expects "string(…) escapes to heap" - "typeparam/issue47631.go", // unified IR can handle local type declarations - "fixedbugs/issue42058a.go", // unified IR doesn't report channel element too large - "fixedbugs/issue42058b.go", // unified IR doesn't report channel element too large - "fixedbugs/issue49767.go", // unified IR doesn't report channel element too large - "fixedbugs/issue49814.go", // unified IR doesn't report array type too large + "typeparam/issue47631.go", // unified IR can handle local type declarations ) func setOf(keys ...string) map[string]bool { diff --git a/test/switch.go b/test/switch.go index 5e1d85bb687..1806fa7f9be 100644 --- a/test/switch.go +++ b/test/switch.go @@ -400,4 +400,18 @@ func main() { case i > x: os.Exit(1) } + + // Unified IR converts the tag and all case values to empty + // interface, when any of the case values aren't assignable to the + // tag value's type. Make sure that `case nil:` compares against the + // tag type's nil value (i.e., `(*int)(nil)`), not nil interface + // (i.e., `any(nil)`). + switch (*int)(nil) { + case nil: + // ok + case any(nil): + assert(false, "case any(nil) matched") + default: + assert(false, "default matched") + } } diff --git a/test/typeparam/mdempsky/16.go b/test/typeparam/mdempsky/16.go new file mode 100644 index 00000000000..f4f79b9aac5 --- /dev/null +++ b/test/typeparam/mdempsky/16.go @@ -0,0 +1,34 @@ +// run + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that type assertion panics mention the real interface type, +// not their shape type. + +package main + +import ( + "fmt" + "runtime" + "strings" +) + +func main() { + // The exact error message isn't important, but it should mention + // `main.T`, not `go.shape.int_0`. + if have := F[T](); !strings.Contains(have, "interface { T() main.T }") { + fmt.Printf("FAIL: unexpected panic message: %q\n", have) + } +} + +type T int + +func F[T any]() (res string) { + defer func() { + res = recover().(runtime.Error).Error() + }() + _ = interface{ T() T }(nil).(T) + return +} diff --git a/test/typeparam/mdempsky/17.go b/test/typeparam/mdempsky/17.go new file mode 100644 index 00000000000..12385c3f9ee --- /dev/null +++ b/test/typeparam/mdempsky/17.go @@ -0,0 +1,110 @@ +// run + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that implicit conversions of derived types to interface type +// in range loops work correctly. + +package main + +import ( + "fmt" + "reflect" +) + +func main() { + test{"int", "V"}.match(RangeArrayAny[V]()) + test{"int", "V"}.match(RangeArrayIface[V]()) + test{"V"}.match(RangeChanAny[V]()) + test{"V"}.match(RangeChanIface[V]()) + test{"K", "V"}.match(RangeMapAny[K, V]()) + test{"K", "V"}.match(RangeMapIface[K, V]()) + test{"int", "V"}.match(RangeSliceAny[V]()) + test{"int", "V"}.match(RangeSliceIface[V]()) +} + +type test []string + +func (t test) match(args ...any) { + if len(t) != len(args) { + fmt.Printf("FAIL: want %v values, have %v\n", len(t), len(args)) + return + } + for i, want := range t { + if have := reflect.TypeOf(args[i]).Name(); want != have { + fmt.Printf("FAIL: %v: want type %v, have %v\n", i, want, have) + } + } +} + +type iface interface{ M() int } + +type K int +type V int + +func (K) M() int { return 0 } +func (V) M() int { return 0 } + +func RangeArrayAny[V any]() (k, v any) { + for k, v = range [...]V{zero[V]()} { + } + return +} + +func RangeArrayIface[V iface]() (k any, v iface) { + for k, v = range [...]V{zero[V]()} { + } + return +} + +func RangeChanAny[V any]() (v any) { + for v = range chanOf(zero[V]()) { + } + return +} + +func RangeChanIface[V iface]() (v iface) { + for v = range chanOf(zero[V]()) { + } + return +} + +func RangeMapAny[K comparable, V any]() (k, v any) { + for k, v = range map[K]V{zero[K](): zero[V]()} { + } + return +} + +func RangeMapIface[K interface { + iface + comparable +}, V iface]() (k, v iface) { + for k, v = range map[K]V{zero[K](): zero[V]()} { + } + return +} + +func RangeSliceAny[V any]() (k, v any) { + for k, v = range []V{zero[V]()} { + } + return +} + +func RangeSliceIface[V iface]() (k any, v iface) { + for k, v = range []V{zero[V]()} { + } + return +} + +func chanOf[T any](elems ...T) chan T { + c := make(chan T, len(elems)) + for _, elem := range elems { + c <- elem + } + close(c) + return c +} + +func zero[T any]() (_ T) { return } diff --git a/test/typeparam/mdempsky/18.go b/test/typeparam/mdempsky/18.go new file mode 100644 index 00000000000..f4a4ec73c5e --- /dev/null +++ b/test/typeparam/mdempsky/18.go @@ -0,0 +1,26 @@ +// run + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that implicit conversions to interface type in a select/case +// clause are compiled correctly. + +package main + +import "fmt" + +func main() { f[int]() } + +func f[T any]() { + ch := make(chan T) + close(ch) + + var i, ok any + select { + case i, ok = <-ch: + } + + fmt.Printf("%T %T\n", i, ok) +} diff --git a/test/typeparam/mdempsky/18.out b/test/typeparam/mdempsky/18.out new file mode 100644 index 00000000000..19f1c39a22d --- /dev/null +++ b/test/typeparam/mdempsky/18.out @@ -0,0 +1 @@ +int bool