1
0
mirror of https://github.com/golang/go synced 2024-09-29 15:14:28 -06:00

all: REVERSE MERGE dev.unified (d558507) into master

This commit is a REVERSE MERGE.
It merges dev.unified back into its parent branch, master.
This marks the end of development on dev.unified.

Merge List:

+ 2022-08-04 d558507db4 [dev.unified] all: merge master (85d87b9) into dev.unified
+ 2022-08-03 c9f2150cfb [dev.unified] cmd/compile: start using runtime dictionaries
+ 2022-07-30 994ff78ba0 [dev.unified] go/internal: set underlying types in proper order
+ 2022-07-28 23554d4744 [dev.unified] all: merge master (462b78f) into dev.unified
+ 2022-07-28 c8d5ccf82e [dev.unified] go/internal/gcimporter: flatten imports
+ 2022-07-28 ac0844ec27 [dev.unified] cmd/compile: move "has init" to private metadata
+ 2022-07-28 f995946094 [dev.unified] cmd/compile: implement simple inline body pruning heuristic
+ 2022-07-28 f2851c67fd [dev.unified] cmd/compile: allow inlining to fail gracefully
+ 2022-07-28 831fdf1dff [dev.unified] cmd/compile: extract nil handling from exprType
+ 2022-07-28 92798176e7 [dev.unified] cmd/compile: write iface conversion RTTI into unified IR
+ 2022-07-28 9b70178d58 [dev.unified] cmd/compile: write RTTI into unified IR export data
+ 2022-07-25 fc72b7705d [dev.unified] cmd/compile: add method expressions to dictionaries
+ 2022-07-25 f48fa643f1 [dev.unified] cmd/compile: remove obsolete RTTI wiring
+ 2022-07-22 131f981df0 [dev.unified] cmd/compile: make Unified IR always writes concrete type for const exprs
+ 2022-07-20 ae43bdc3e3 Merge "[dev.unified] all: merge master (8e1e64c) into dev.unified" into dev.unified
+ 2022-07-19 7a8ba83b72 [dev.unified] cmd/compile/internal/reflectdata: remove hasRType's `required` param
+ 2022-07-19 64cd6faa13 [dev.unified] cmd/compile/internal/noder: simplify mixed tag/case RTTI wiring
+ 2022-07-19 a4c5198a3c [dev.unified] cmd/compile/internal/noder: better switch statements
+ 2022-07-19 318027044a [dev.unified] cmd/compile/internal/noder: explicit nil handling
+ 2022-07-19 e971b6a9be [dev.unified] test: add switch test case for tricky nil handling
+ 2022-07-19 878439cfe5 [dev.unified] cmd/compile/internal/noder: preserve RTTI for select statements
+ 2022-07-19 e376746e54 [dev.unified] cmd/compile/internal/noder: wire RTTI for implicit conversions
+ 2022-07-19 c846fd8e13 [dev.unified] cmd/compile/internal/noder: implicit conversions for binary exprs
+ 2022-07-19 ebd34e3e45 [dev.unified] test: relax panic message expectations
+ 2022-07-19 76a82f09d6 [dev.unified] cmd/compile/internal/noder: prefer *At functions
+ 2022-07-19 de649a2a98 [dev.unified] all: merge master (8e1e64c) into dev.unified
+ 2022-07-19 055a5e55fa [dev.unified] test: change Unicode file/package name to use characters not translated by macOS.
+ 2022-07-18 2cf632cd57 [dev.unified] cmd/compile/internal/reflectdata: prefer ITabAddrAt in ConvIfaceTypeWord
+ 2022-07-12 9371a65584 internal/pkgbits: change EnableSync into a dynamic knob
+ 2022-07-01 d667be8831 [dev.unified] cmd/compile/internal/walk: RType fields for range assignments
+ 2022-06-30 1b838e9556 [dev.unified] all: merge master (993c387) into dev.unified
+ 2022-06-30 0a503cf43a [dev.unified] cmd/compile: refactor `range` desugaring
+ 2022-06-30 3635b07d16 [dev.unified] cmd/compile/internal/noder: implicit conversions for multi-valued expressions
+ 2022-06-30 e7219cc093 [dev.unified] cmd/compile/internal/noder: refactor N:1 expression handling
+ 2022-06-30 2f3ef73e18 [dev.unified] test: tweak nilcheck test
+ 2022-06-30 95d7ce9ab1 [dev.unified] test: break escape_iface.go into unified/nounified variants
+ 2022-06-30 f751319a0b [dev.unified] test: relax live_regabi.go
+ 2022-06-30 e3cdc981c8 [dev.unified] cmd/compile/internal/walk: fix typo in debug print
+ 2022-06-29 2280d897d6 [dev.unified] test: add regress test for generic select statements
+ 2022-06-27 4b78ece3d7 [dev.unified] cmd/compile: drop package height from Unified IR importer
+ 2022-06-27 398d46d538 [dev.unified] cmd/compile/internal/types2: remove package height
+ 2022-06-24 e7100adbca [dev.unified] all: merge master (5a1c5b8) into dev.unified
+ 2022-06-23 09a838ad86 [dev.unified] cmd/compile: rename haveRType and implicitExpr
+ 2022-06-23 421e9e9db2 [dev.unified] cmd/compile: implicit conversions for return statements
+ 2022-06-23 a3fea7796a [dev.unified] cmd/compile/internal/noder: implicit conversions for writer.assignStmt
+ 2022-06-23 82a958a661 [dev.unified] cmd/compile/internal/noder: refactor stmtAssign generation
+ 2022-06-23 711dacd8cf [dev.unified] cmd/compile/internal/noder: implicit conversion of call arguments
+ 2022-06-23 46b01ec667 [dev.unified] cmd/compile/internal/noder: remove needType logic
+ 2022-06-23 a3e474f867 [dev.unified] cmd/compile/internal/noder: implicit conversions for complits
+ 2022-06-23 5f5422a2dd [dev.unified] cmd/compile/internal/noder: start writing implicit conversions
+ 2022-06-23 9cb784ac69 [dev.unified] cmd/compile/internal/noder: add pkgWriter.typeOf helper
+ 2022-06-23 c70e93ff3d [dev.unified] cmd/compile/internal/typecheck: replace unreachable code with assert
+ 2022-06-23 20e1d5ac8c [dev.unified] cmd/compile: special case f(g()) calls in Unified IR
+ 2022-06-23 61ae2b734c [dev.unified] cmd/compile: plumb rtype through OSWITCH/OCASE clauses
+ 2022-06-23 3d432b6c4b [dev.unified] cmd/compile: plumb rtype through for OMAPLIT
+ 2022-06-23 7368647ac6 [dev.unified] cmd/compile: start setting RType fields for Unified IR
+ 2022-06-23 5960f4ec10 [dev.unified] cmd/compile: add RType fields
+ 2022-06-21 5e0258c700 [dev.unified] cmd/compile: avoid reflectType in ssagen
+ 2022-06-21 93833cd5d8 [dev.unified] cmd/compile: extract rtype code from walk
+ 2022-06-21 f70775ff22 [dev.unified] cmd/compile: refactor reflectdata.{TypePtr,ITabAddr}
+ 2022-06-21 fc5dad6646 [dev.unified] cmd/compile/internal/walk: minor prep refactoring
+ 2022-06-16 1f4e8afafe [dev.unified] all: merge master (635b124) into dev.unified
+ 2022-06-15 8a9485c023 [dev.unified] test: extract different inline test between unified and non-unified
+ 2022-06-14 394ea70cc9 [dev.unified] cmd/compile: more Unified IR docs and review
+ 2022-06-10 f73ad3d24d [dev.unified] test: add regress tests for #53276 and #53328
+ 2022-06-09 8ef8b60e18 [dev.unified] cmd/compile/internal/noder: stop handling type expressions as expressions
+ 2022-06-09 1a6c96bb9b [dev.unified] test: relax issue7921.go diagnostic message
+ 2022-06-09 c50c6bbc03 [dev.unified] cmd/compile: set base.Pos when process assignDef in Unified IR
+ 2022-06-09 d6df08693c [dev.unified] cmd/compile: fix unified IR don't report type size too large error
+ 2022-06-08 e7ef58542c [dev.unified] cmd/compile: restore Unified IR linkname pragma diagnostic
+ 2022-06-07 9e5c968021 [dev.unified] cmd/compile: visit LHS before RHS/X in assign/for statement
+ 2022-06-06 46ddf0873e [dev.unified] cmd/compile: export/import implicit attribute for conversion exprs
+ 2022-06-06 a8780f94c3 [dev.unified] cmd/compile: fix missing method value wrapper in unified IR
+ 2022-06-06 3a1f1e1575 [dev.unified] cmd/compile: remove package height
+ 2022-06-06 df7cb59de4 [dev.unified] cmd/compile: only sort symbols by name and package path
+ 2022-06-06 b39ac80871 [dev.unified] cmd/compile/internal/noder: push exprBlank up into assignment handling
+ 2022-06-06 55fc07e164 [dev.unified] cmd/compile/internal/noder: add optExpr for optional expressions
+ 2022-06-06 6c33f1d52e [dev.unified] cmd/compile/internal/noder: rename exprName to exprGlobal
+ 2022-06-06 4d28fcabb4 [dev.unified] all: update codereview.cfg for dev.unified branch

Change-Id: I604d057735e8a365621c91c206f9e46eabb4679b
This commit is contained in:
Matthew Dempsky 2022-08-04 10:32:51 -07:00
commit a10afb15e0
72 changed files with 2709 additions and 674 deletions

View File

@ -163,6 +163,7 @@ func ParseFlags() {
if buildcfg.Experiment.Unified {
Debug.Unified = 1
}
Debug.SyncFrames = -1 // disable sync markers by default
Debug.Checkptr = -1 // so we can tell whether it is set explicitly

View File

@ -75,11 +75,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "")
// We won't know localpkg's height until after import
// processing. In the mean time, set to MaxPkgHeight to ensure
// height comparisons at least work until then.
types.LocalPkg.Height = types.MaxPkgHeight
// pseudo-package, for scoping
types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin

View File

@ -139,22 +139,19 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
pkgHeight := int(r.uint64())
_ = int(r.uint64()) // was package height, but not necessary anymore.
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
pkg = types2.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
} else {
if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
if pkg.Height() != pkgHeight {
errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
}
}
p.pkgCache[pkgPathOff] = pkg

View File

@ -39,7 +39,7 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
r.Bool() // has init
r.Bool() // TODO(mdempsky): Remove; was "has init"
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
@ -162,9 +162,7 @@ func (r *reader) doPkg() *types2.Package {
}
name := r.String()
height := r.Len()
pkg := types2.NewPackageHeight(path, name, height)
pkg := types2.NewPackage(path, name)
r.p.imports[path] = pkg
// TODO(mdempsky): The list of imported packages is important for

View File

@ -430,6 +430,36 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
case ir.OMETHEXPR:
v.budget++ // Hack for toolstash -cmp.
case ir.OAS2:
n := n.(*ir.AssignListStmt)
// Unified IR unconditionally rewrites:
//
// a, b = f()
//
// into:
//
// DCL tmp1
// DCL tmp2
// tmp1, tmp2 = f()
// a, b = tmp1, tmp2
//
// so that it can insert implicit conversions as necessary. To
// minimize impact to the existing inlining heuristics (in
// particular, to avoid breaking the existing inlinability regress
// tests), we need to compensate for this here.
if base.Debug.Unified != 0 {
if init := n.Rhs[0].Init(); len(init) == 1 {
if _, ok := init[0].(*ir.AssignListStmt); ok {
// 4 for each value, because each temporary variable now
// appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
//
// 1 for the extra "tmp1, tmp2 = f()" assignment statement.
v.budget += 4*int32(len(n.Lhs)) + 1
}
}
}
}
v.budget--
@ -655,9 +685,8 @@ var inlgen int
var SSADumpInline = func(*ir.Func) {}
// NewInline allows the inliner implementation to be overridden.
// If it returns nil, the legacy inliner will handle this call
// instead.
var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
// If it returns nil, the function will not be inlined.
var NewInline = oldInline
// If n is a OCALLFUNC node, and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
@ -777,7 +806,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
res := NewInline(n, fn, inlIndex)
if res == nil {
res = oldInline(n, fn, inlIndex)
return n
}
// transitive inlining

View File

@ -119,8 +119,9 @@ func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
// or Op(X, Y) for builtin functions that do not become calls.
type BinaryExpr struct {
miniExpr
X Node
Y Node
X Node
Y Node
RType Node `mknode:"-"` // see reflectdata/helpers.go
}
func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
@ -148,6 +149,7 @@ type CallExpr struct {
origNode
X Node
Args Nodes
RType Node `mknode:"-"` // see reflectdata/helpers.go
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
NoInline bool
@ -192,6 +194,7 @@ type CompLitExpr struct {
miniExpr
origNode
List Nodes // initialized values
RType Node `mknode:"-"` // *runtime._type for OMAPLIT map types
Prealloc *Name
// For OSLICELIT, Len is the backing array length.
// For OMAPLIT, Len is the number of entries that we've removed from List and
@ -246,6 +249,27 @@ func (n *ConstExpr) Val() constant.Value { return n.val }
type ConvExpr struct {
miniExpr
X Node
// For implementing OCONVIFACE expressions.
//
// TypeWord is an expression yielding a *runtime._type or
// *runtime.itab value to go in the type word of the iface/eface
// result. See reflectdata.ConvIfaceTypeWord for further details.
//
// SrcRType is an expression yielding a *runtime._type value for X,
// if it's not pointer-shaped and needs to be heap allocated.
TypeWord Node `mknode:"-"`
SrcRType Node `mknode:"-"`
// For -d=checkptr instrumentation of conversions from
// unsafe.Pointer to *Elem or *[Len]Elem.
//
// TODO(mdempsky): We only ever need one of these, but currently we
// don't decide which one until walk. Longer term, it probably makes
// sense to have a dedicated IR op for `(*[Len]Elem)(ptr)[:n:m]`
// expressions.
ElemRType Node `mknode:"-"`
ElemElemRType Node `mknode:"-"`
}
func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
@ -275,6 +299,7 @@ type IndexExpr struct {
miniExpr
X Node
Index Node
RType Node `mknode:"-"` // see reflectdata/helpers.go
Assigned bool
}
@ -385,8 +410,9 @@ func (n *LogicalExpr) SetOp(op Op) {
// but *not* OMAKE (that's a pre-typechecking CallExpr).
type MakeExpr struct {
miniExpr
Len Node
Cap Node
RType Node `mknode:"-"` // see reflectdata/helpers.go
Len Node
Cap Node
}
func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
@ -623,7 +649,7 @@ type TypeAssertExpr struct {
// Runtime type information provided by walkDotType for
// assertions from non-empty interface to concrete type.
ITab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
@ -650,6 +676,11 @@ type DynamicTypeAssertExpr struct {
miniExpr
X Node
// SrcRType is an expression that yields a *runtime._type value
// representing X's type. It's used in failed assertion panic
// messages.
SrcRType Node
// RType is an expression that yields a *runtime._type value
// representing the asserted type.
//

View File

@ -170,6 +170,17 @@ type CaseClause struct {
miniStmt
Var *Name // declared variable for this case in type switch
List Nodes // list of expressions for switch, early select
// RTypes is a list of RType expressions, which are copied to the
// corresponding OEQ nodes that are emitted when switch statements
// are desugared. RTypes[i] must be non-nil if the emitted
// comparison for List[i] will be a mixed interface/concrete
// comparison; see reflectdata.CompareRType for details.
//
// Because mixed interface/concrete switch cases are rare, we allow
// len(RTypes) < len(List). Missing entries are implicitly nil.
RTypes Nodes
Body Nodes
}
@ -333,11 +344,20 @@ type RangeStmt struct {
Label *types.Sym
Def bool
X Node
RType Node `mknode:"-"` // see reflectdata/helpers.go
Key Node
Value Node
Body Nodes
HasBreak bool
Prealloc *Name
// When desugaring the RangeStmt during walk, the assignments to Key
// and Value may require OCONVIFACE operations. If so, these fields
// will be copied to their respective ConvExpr fields.
KeyTypeWord Node `mknode:"-"`
KeySrcRType Node `mknode:"-"`
ValueTypeWord Node `mknode:"-"`
ValueSrcRType Node `mknode:"-"`
}
func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt {

View File

@ -1,5 +1,3 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -8,6 +6,7 @@ package noder
import "internal/pkgbits"
// A codeStmt distinguishes among statement encodings.
type codeStmt int
func (c codeStmt) Marker() pkgbits.SyncMarker { return pkgbits.SyncStmt1 }
@ -31,6 +30,7 @@ const (
stmtSelect
)
// A codeExpr distinguishes among expression encodings.
type codeExpr int
func (c codeExpr) Marker() pkgbits.SyncMarker { return pkgbits.SyncExpr }
@ -38,12 +38,9 @@ func (c codeExpr) Value() int { return int(c) }
// TODO(mdempsky): Split expr into addr, for lvalues.
const (
exprNone codeExpr = iota
exprConst
exprType // type expression
exprLocal // local variable
exprName // global variable or function
exprBlank
exprConst codeExpr = iota
exprLocal // local variable
exprGlobal // global variable or function
exprCompLit
exprFuncLit
exprSelector
@ -54,8 +51,23 @@ const (
exprBinaryOp
exprCall
exprConvert
exprNew
exprMake
exprNil
)
type codeAssign int
func (c codeAssign) Marker() pkgbits.SyncMarker { return pkgbits.SyncAssign }
func (c codeAssign) Value() int { return int(c) }
const (
assignBlank codeAssign = iota
assignDef
assignExpr
)
// A codeDecl distinguishes among declaration encodings.
type codeDecl int
func (c codeDecl) Marker() pkgbits.SyncMarker { return pkgbits.SyncDecl }

View File

@ -6,7 +6,6 @@ package noder
import (
"fmt"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@ -53,31 +52,9 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
base.Assert(g.exprStmtOK)
// The gc backend expects all expressions to have a concrete type, and
// types2 mostly satisfies this expectation already. But there are a few
// cases where the Go spec doesn't require converting to concrete type,
// and so types2 leaves them untyped. So we need to fix those up here.
typ := tv.Type
if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
switch basic.Kind() {
case types2.UntypedNil:
// ok; can appear in type switch case clauses
// TODO(mdempsky): Handle as part of type switches instead?
case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
// Untyped rhs of non-constant shift, e.g. x << 1.0.
// If we have a constant value, it must be an int >= 0.
if tv.Value != nil {
s := constant.ToInt(tv.Value)
assert(s.Kind() == constant.Int && constant.Sign(s) >= 0)
}
typ = types2.Typ[types2.Uint]
case types2.UntypedBool:
typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
case types2.UntypedString:
typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
default:
base.FatalfAt(g.pos(expr), "unexpected untyped type: %v", basic)
}
typ := idealType(tv)
if typ == nil {
base.FatalfAt(g.pos(expr), "unexpected untyped type: %v", tv.Type)
}
// Constant expression.

View File

@ -11,6 +11,7 @@ import (
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
"cmd/internal/src"
)
@ -39,10 +40,6 @@ func typed(typ *types.Type, n ir.Node) ir.Node {
// Values
func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node {
return typed(typ, ir.NewBasicLit(pos, val))
}
func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
orig := ir.NewRawOrigExpr(pos, op, raw)
return ir.NewConstExpr(val, typed(typ, orig))
@ -224,3 +221,33 @@ func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
}
return ir.NewAssignOpStmt(pos, op, x, bl)
}
func idealType(tv types2.TypeAndValue) types2.Type {
// The gc backend expects all expressions to have a concrete type, and
// types2 mostly satisfies this expectation already. But there are a few
// cases where the Go spec doesn't require converting to concrete type,
// and so types2 leaves them untyped. So we need to fix those up here.
typ := tv.Type
if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
switch basic.Kind() {
case types2.UntypedNil:
// ok; can appear in type switch case clauses
// TODO(mdempsky): Handle as part of type switches instead?
case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
// Untyped rhs of non-constant shift, e.g. x << 1.0.
// If we have a constant value, it must be an int >= 0.
if tv.Value != nil {
s := constant.ToInt(tv.Value)
assert(s.Kind() == constant.Int && constant.Sign(s) >= 0)
}
typ = types2.Typ[types2.Uint]
case types2.UntypedBool:
typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
case types2.UntypedString:
typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
default:
return nil
}
}
return typ
}

View File

@ -241,7 +241,7 @@ func readImportFile(path string, target *ir.Package, env *types2.Context, packag
pr := pkgbits.NewPkgDecoder(pkg1.Path, data)
// Read package descriptors for both types2 and compiler backend.
readPackage(newPkgReader(pr), pkg1)
readPackage(newPkgReader(pr), pkg1, false)
pkg2 = importer.ReadPackage(env, packages, pr)
case 'i':

View File

@ -219,7 +219,6 @@ type typeDelayInfo struct {
func (g *irgen) generate(noders []*noder) {
types.LocalPkg.Name = g.self.Name()
types.LocalPkg.Height = g.self.Height()
typecheck.TypecheckAllowed = true
// Prevent size calculations until we set the underlying type

View File

@ -1,5 +1,3 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -34,13 +32,20 @@ import (
// low-level linking details can be moved there, but the logic for
// handling extension data needs to stay in the compiler.
// A linker combines a package's stub export data with any referenced
// elements from imported packages into a single, self-contained
// export data file.
type linker struct {
pw pkgbits.PkgEncoder
pkgs map[string]pkgbits.Index
decls map[*types.Sym]pkgbits.Index
pkgs map[string]pkgbits.Index
decls map[*types.Sym]pkgbits.Index
bodies map[*types.Sym]pkgbits.Index
}
// relocAll ensures that all elements specified by pr and relocs are
// copied into the output export data file, and returns the
// corresponding indices in the output.
func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.RelocEnt {
res := make([]pkgbits.RelocEnt, len(relocs))
for i, rent := range relocs {
@ -50,6 +55,8 @@ func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.Re
return res
}
// relocIdx ensures a single element is copied into the output export
// data file, and returns the corresponding index in the output.
func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index) pkgbits.Index {
assert(pr != nil)
@ -85,10 +92,19 @@ func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index)
return newidx
}
// relocString copies the specified string from pr into the output
// export data file, deduplicating it against other strings.
func (l *linker) relocString(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
return l.pw.StringIdx(pr.StringIdx(idx))
}
// relocPkg copies the specified package from pr into the output
// export data file, rewriting its import path to match how it was
// imported.
//
// TODO(mdempsky): Since CL 391014, we already have the compilation
// unit's import path, so there should be no need to rewrite packages
// anymore.
func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
path := pr.PeekPkgPath(idx)
@ -114,6 +130,9 @@ func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
return w.Flush()
}
// relocObj copies the specified object from pr into the output export
// data file, rewriting its compiler-private extension data (e.g.,
// adding inlining cost and escape analysis results for functions).
func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
path, name, tag := pr.PeekObj(idx)
sym := types.NewPkg(path, "").Lookup(name)
@ -152,21 +171,12 @@ func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
l.relocCommon(pr, &wname, pkgbits.RelocName, idx)
l.relocCommon(pr, &wdict, pkgbits.RelocObjDict, idx)
var obj *ir.Name
if sym.Pkg == types.LocalPkg {
var ok bool
obj, ok = sym.Def.(*ir.Name)
// Generic types and functions won't have definitions, and imported
// objects may not either.
obj, _ := sym.Def.(*ir.Name)
local := sym.Pkg == types.LocalPkg
// Generic types and functions and declared constraint types won't
// have definitions.
// For now, just generically copy their extension data.
// TODO(mdempsky): Restore assertion.
if !ok && false {
base.Fatalf("missing definition for %v", sym)
}
}
if obj != nil {
if local && obj != nil {
wext.Sync(pkgbits.SyncObject1)
switch tag {
case pkgbits.ObjFunc:
@ -181,9 +191,66 @@ func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
l.relocCommon(pr, &wext, pkgbits.RelocObjExt, idx)
}
// Check if we need to export the inline bodies for functions and
// methods.
if obj != nil {
if obj.Op() == ir.ONAME && obj.Class == ir.PFUNC {
l.exportBody(obj, local)
}
if obj.Op() == ir.OTYPE {
if typ := obj.Type(); !typ.IsInterface() {
for _, method := range typ.Methods().Slice() {
l.exportBody(method.Nname.(*ir.Name), local)
}
}
}
}
return w.Idx
}
// exportBody exports the given function or method's body, if
// appropriate. local indicates whether it's a local function or
// method available on a locally declared type. (Due to cross-package
// type aliases, a method may be imported, but still available on a
// locally declared type.)
func (l *linker) exportBody(obj *ir.Name, local bool) {
assert(obj.Op() == ir.ONAME && obj.Class == ir.PFUNC)
fn := obj.Func
if fn.Inl == nil {
return // not inlinable anyway
}
// As a simple heuristic, if the function was declared in this
// package or we inlined it somewhere in this package, then we'll
// (re)export the function body. This isn't perfect, but seems
// reasonable in practice. In particular, it has the nice property
// that in the worst case, adding a blank import ensures the
// function body is available for inlining.
//
// TODO(mdempsky): Reimplement the reachable method crawling logic
// from typecheck/crawler.go.
exportBody := local || fn.Inl.Body != nil
if !exportBody {
return
}
sym := obj.Sym()
if _, ok := l.bodies[sym]; ok {
// Due to type aliases, we might visit methods multiple times.
base.AssertfAt(obj.Type().Recv() != nil, obj.Pos(), "expected method: %v", obj)
return
}
pri, ok := bodyReaderFor(fn)
assert(ok)
l.bodies[sym] = l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx)
}
// relocCommon copies the specified element from pr into w,
// recursively relocating any referenced elements as well.
func (l *linker) relocCommon(pr *pkgReader, w *pkgbits.Encoder, k pkgbits.RelocKind, idx pkgbits.Index) {
r := pr.NewDecoderRaw(k, idx)
w.Relocs = l.relocAll(pr, r.Relocs)
@ -220,10 +287,6 @@ func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) {
if inl := name.Func.Inl; w.Bool(inl != nil) {
w.Len(int(inl.Cost))
w.Bool(inl.CanDelayResults)
pri, ok := bodyReader[name.Func]
assert(ok)
w.Reloc(pkgbits.RelocBody, l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx))
}
w.Sync(pkgbits.SyncEOF)

View File

@ -1,5 +1,3 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -12,12 +10,12 @@ import (
"cmd/compile/internal/syntax"
)
// This file defines helper functions useful for satisfying toolstash
// -cmp when compared against the legacy frontend behavior, but can be
// removed after that's no longer a concern.
// typeExprEndPos returns the position that noder would leave base.Pos
// after parsing the given type expression.
//
// Deprecated: This function exists to emulate position semantics from
// Go 1.17, necessary for compatibility with the backend DWARF
// generation logic that assigns variables to their appropriate scope.
func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
for {
switch expr := expr0.(type) {

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -82,13 +80,12 @@ func unified(noders []*noder) {
base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
types.ParseLangFlag()
types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain
target := typecheck.Target
typecheck.TypecheckAllowed = true
localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
readPackage(localPkgReader, types.LocalPkg)
readPackage(localPkgReader, types.LocalPkg, true)
r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
r.pkgInit(types.LocalPkg, target)
@ -164,7 +161,7 @@ func writePkgStub(noders []*noder) string {
{
w := publicRootWriter
w.pkg(pkg)
w.Bool(false) // has init; XXX
w.Bool(false) // TODO(mdempsky): Remove; was "has init"
scope := pkg.Scope()
names := scope.Names()
@ -227,42 +224,76 @@ func freePackage(pkg *types2.Package) {
base.Fatalf("package never finalized")
}
func readPackage(pr *pkgReader, importpkg *types.Pkg) {
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
// readPackage reads package export data from pr to populate
// importpkg.
//
// localStub indicates whether pr is reading the stub export data for
// the local package, as opposed to relocated export data for an
// import.
func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) {
{
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg)
pkg := r.pkg()
base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg)
if r.Bool() {
sym := pkg.Lookup(".inittask")
task := ir.NewNameAt(src.NoXPos, sym)
task.Class = ir.PEXTERN
sym.Def = task
r.Bool() // TODO(mdempsky): Remove; was "has init"
for i, n := 0, r.Len(); i < n; i++ {
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
idx := r.Reloc(pkgbits.RelocObj)
assert(r.Len() == 0)
path, name, code := r.p.PeekObj(idx)
if code != pkgbits.ObjStub {
objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil, nil}
}
}
r.Sync(pkgbits.SyncEOF)
}
for i, n := 0, r.Len(); i < n; i++ {
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
idx := r.Reloc(pkgbits.RelocObj)
assert(r.Len() == 0)
if !localStub {
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
path, name, code := r.p.PeekObj(idx)
if code != pkgbits.ObjStub {
objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
if r.Bool() {
sym := importpkg.Lookup(".inittask")
task := ir.NewNameAt(src.NoXPos, sym)
task.Class = ir.PEXTERN
sym.Def = task
}
for i, n := 0, r.Len(); i < n; i++ {
path := r.String()
name := r.String()
idx := r.Reloc(pkgbits.RelocBody)
sym := types.NewPkg(path, "").Lookup(name)
if _, ok := importBodyReader[sym]; !ok {
importBodyReader[sym] = pkgReaderIndex{pr, idx, nil, nil}
}
}
r.Sync(pkgbits.SyncEOF)
}
}
// writeUnifiedExport writes to `out` the finalized, self-contained
// Unified IR export data file for the current compilation unit.
func writeUnifiedExport(out io.Writer) {
l := linker{
pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
pkgs: make(map[string]pkgbits.Index),
decls: make(map[*types.Sym]pkgbits.Index),
pkgs: make(map[string]pkgbits.Index),
decls: make(map[*types.Sym]pkgbits.Index),
bodies: make(map[*types.Sym]pkgbits.Index),
}
publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic)
privateRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPrivate)
assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
var selfPkgIdx pkgbits.Index
@ -273,7 +304,7 @@ func writeUnifiedExport(out io.Writer) {
r.Sync(pkgbits.SyncPkg)
selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg))
r.Bool() // has init
r.Bool() // TODO(mdempsky): Remove; was "has init"
for i, n := 0, r.Len(); i < n; i++ {
r.Sync(pkgbits.SyncObject)
@ -304,8 +335,7 @@ func writeUnifiedExport(out io.Writer) {
w.Sync(pkgbits.SyncPkg)
w.Reloc(pkgbits.RelocPkg, selfPkgIdx)
w.Bool(typecheck.Lookup(".inittask").Def != nil)
w.Bool(false) // TODO(mdempsky): Remove; was "has init"
w.Len(len(idxs))
for _, idx := range idxs {
@ -319,5 +349,31 @@ func writeUnifiedExport(out io.Writer) {
w.Flush()
}
{
type symIdx struct {
sym *types.Sym
idx pkgbits.Index
}
var bodies []symIdx
for sym, idx := range l.bodies {
bodies = append(bodies, symIdx{sym, idx})
}
sort.Slice(bodies, func(i, j int) bool { return bodies[i].idx < bodies[j].idx })
w := privateRootWriter
w.Bool(typecheck.Lookup(".inittask").Def != nil)
w.Len(len(bodies))
for _, body := range bodies {
w.String(body.sym.Pkg.Path)
w.String(body.sym.Name)
w.Reloc(pkgbits.RelocBody, body.idx)
}
w.Sync(pkgbits.SyncEOF)
w.Flush()
}
base.Ctxt.Fingerprint = l.pw.DumpTo(out)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,226 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflectdata
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
func hasRType(n, rtype ir.Node, fieldName string) bool {
if rtype != nil {
return true
}
// We make an exception for `init`, because we still depend on
// pkginit for sorting package initialization statements, and it
// gets confused by implicit conversions. Also, because
// package-scope statements can never be generic, so they'll never
// require dictionary lookups.
if base.Debug.Unified != 0 && ir.CurFunc.Nname.Sym().Name != "init" {
ir.Dump("CurFunc", ir.CurFunc)
base.FatalfAt(n.Pos(), "missing %s in %v: %+v", fieldName, ir.CurFunc, n)
}
return false
}
// assertOp asserts that n is an op.
func assertOp(n ir.Node, op ir.Op) {
base.AssertfAt(n.Op() == op, n.Pos(), "want %v, have %v", op, n)
}
// assertOp2 asserts that n is an op1 or op2.
func assertOp2(n ir.Node, op1, op2 ir.Op) {
base.AssertfAt(n.Op() == op1 || n.Op() == op2, n.Pos(), "want %v or %v, have %v", op1, op2, n)
}
// kindRType asserts that typ has the given kind, and returns an
// expression that yields the *runtime._type value representing typ.
func kindRType(pos src.XPos, typ *types.Type, k types.Kind) ir.Node {
base.AssertfAt(typ.Kind() == k, pos, "want %v type, have %v", k, typ)
return TypePtrAt(pos, typ)
}
// mapRType asserts that typ is a map type, and returns an expression
// that yields the *runtime._type value representing typ.
func mapRType(pos src.XPos, typ *types.Type) ir.Node {
return kindRType(pos, typ, types.TMAP)
}
// chanRType asserts that typ is a map type, and returns an expression
// that yields the *runtime._type value representing typ.
func chanRType(pos src.XPos, typ *types.Type) ir.Node {
return kindRType(pos, typ, types.TCHAN)
}
// sliceElemRType asserts that typ is a slice type, and returns an
// expression that yields the *runtime._type value representing typ's
// element type.
func sliceElemRType(pos src.XPos, typ *types.Type) ir.Node {
base.AssertfAt(typ.IsSlice(), pos, "want slice type, have %v", typ)
return TypePtrAt(pos, typ.Elem())
}
// concreteRType asserts that typ is not an interface type, and
// returns an expression that yields the *runtime._type value
// representing typ.
func concreteRType(pos src.XPos, typ *types.Type) ir.Node {
base.AssertfAt(!typ.IsInterface(), pos, "want non-interface type, have %v", typ)
return TypePtrAt(pos, typ)
}
// AppendElemRType asserts that n is an "append" operation, and
// returns an expression that yields the *runtime._type value
// representing the result slice type's element type.
func AppendElemRType(pos src.XPos, n *ir.CallExpr) ir.Node {
assertOp(n, ir.OAPPEND)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return sliceElemRType(pos, n.Type())
}
// CompareRType asserts that n is a comparison (== or !=) operation
// between expressions of interface and non-interface type, and
// returns an expression that yields the *runtime._type value
// representing the non-interface type.
func CompareRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
assertOp2(n, ir.OEQ, ir.ONE)
base.AssertfAt(n.X.Type().IsInterface() != n.Y.Type().IsInterface(), n.Pos(), "expect mixed interface and non-interface, have %L and %L", n.X, n.Y)
if hasRType(n, n.RType, "RType") {
return n.RType
}
typ := n.X.Type()
if typ.IsInterface() {
typ = n.Y.Type()
}
return concreteRType(pos, typ)
}
// ConvIfaceTypeWord asserts that n is conversion to interface type,
// and returns an expression that yields the *runtime._type or
// *runtime.itab value necessary for implementing the conversion.
//
// - *runtime._type for the destination type, for I2I conversions
// - *runtime.itab, for T2I conversions
// - *runtime._type for the source type, for T2E conversions
func ConvIfaceTypeWord(pos src.XPos, n *ir.ConvExpr) ir.Node {
assertOp(n, ir.OCONVIFACE)
src, dst := n.X.Type(), n.Type()
base.AssertfAt(dst.IsInterface(), n.Pos(), "want interface type, have %L", n)
if hasRType(n, n.TypeWord, "TypeWord") {
return n.TypeWord
}
if dst.IsEmptyInterface() {
return concreteRType(pos, src) // direct eface construction
}
if !src.IsInterface() {
return ITabAddrAt(pos, src, dst) // direct iface construction
}
return TypePtrAt(pos, dst) // convI2I
}
// ConvIfaceSrcRType asserts that n is a conversion from
// non-interface type to interface type (or OCONVIDATA operation), and
// returns an expression that yields the *runtime._type for copying
// the convertee value to the heap.
func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node {
assertOp2(n, ir.OCONVIFACE, ir.OCONVIDATA)
if hasRType(n, n.SrcRType, "SrcRType") {
return n.SrcRType
}
return concreteRType(pos, n.X.Type())
}
// CopyElemRType asserts that n is a "copy" operation, and returns an
// expression that yields the *runtime._type value representing the
// destination slice type's element type.
func CopyElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
assertOp(n, ir.OCOPY)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return sliceElemRType(pos, n.X.Type())
}
// DeleteMapRType asserts that n is a "delete" operation, and returns
// an expression that yields the *runtime._type value representing the
// map type.
func DeleteMapRType(pos src.XPos, n *ir.CallExpr) ir.Node {
assertOp(n, ir.ODELETE)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return mapRType(pos, n.Args[0].Type())
}
// IndexMapRType asserts that n is a map index operation, and returns
// an expression that yields the *runtime._type value representing the
// map type.
func IndexMapRType(pos src.XPos, n *ir.IndexExpr) ir.Node {
assertOp(n, ir.OINDEXMAP)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return mapRType(pos, n.X.Type())
}
// MakeChanRType asserts that n is a "make" operation for a channel
// type, and returns an expression that yields the *runtime._type
// value representing that channel type.
func MakeChanRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
assertOp(n, ir.OMAKECHAN)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return chanRType(pos, n.Type())
}
// MakeMapRType asserts that n is a "make" operation for a map type,
// and returns an expression that yields the *runtime._type value
// representing that map type.
func MakeMapRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
assertOp(n, ir.OMAKEMAP)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return mapRType(pos, n.Type())
}
// MakeSliceElemRType asserts that n is a "make" operation for a slice
// type, and returns an expression that yields the *runtime._type
// value representing that slice type's element type.
func MakeSliceElemRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
assertOp2(n, ir.OMAKESLICE, ir.OMAKESLICECOPY)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return sliceElemRType(pos, n.Type())
}
// RangeMapRType asserts that n is a "range" loop over a map value,
// and returns an expression that yields the *runtime._type value
// representing that map type.
func RangeMapRType(pos src.XPos, n *ir.RangeStmt) ir.Node {
assertOp(n, ir.ORANGE)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return mapRType(pos, n.X.Type())
}
// UnsafeSliceElemRType asserts that n is an "unsafe.Slice" operation,
// and returns an expression that yields the *runtime._type value
// representing the result slice type's element type.
func UnsafeSliceElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
assertOp(n, ir.OUNSAFESLICE)
if hasRType(n, n.RType, "RType") {
return n.RType
}
return sliceElemRType(pos, n.Type())
}

View File

@ -842,9 +842,15 @@ func TypeLinksym(t *types.Type) *obj.LSym {
return TypeSym(t).Linksym()
}
// Deprecated: Use TypePtrAt instead.
func TypePtr(t *types.Type) *ir.AddrExpr {
n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
return TypePtrAt(base.Pos, t)
}
// TypePtrAt returns an expression that evaluates to the
// *runtime._type value for t.
func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr {
return typecheck.LinksymAddr(pos, TypeLinksym(t), types.Types[types.TUINT8])
}
// ITabLsym returns the LSym representing the itab for concrete type typ implementing
@ -864,9 +870,15 @@ func ITabLsym(typ, iface *types.Type) *obj.LSym {
return lsym
}
// ITabAddr returns an expression representing a pointer to the itab
// for concrete type typ implementing interface iface.
// Deprecated: Use ITabAddrAt instead.
func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
return ITabAddrAt(base.Pos, typ, iface)
}
// ITabAddrAt returns an expression that evaluates to the
// *runtime.itab value for concrete type typ implementing interface
// iface.
func ITabAddrAt(pos src.XPos, typ, iface *types.Type) *ir.AddrExpr {
s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
lsym := s.Linksym()
@ -874,8 +886,7 @@ func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
writeITab(lsym, typ, iface, false)
}
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
return typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8])
}
// needkeyupdate reports whether map updates with t as a key

View File

@ -76,7 +76,7 @@ func TestDebugLinesPushback(t *testing.T) {
fn := "(*List[go.shape.int_0]).PushBack"
if buildcfg.Experiment.Unified {
// Unified mangles differently
fn = "(*List[int]).PushBack"
fn = "(*List[int]).PushBack-shaped"
}
testDebugLines(t, "-N -l", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
}
@ -95,7 +95,7 @@ func TestDebugLinesConvert(t *testing.T) {
fn := "G[go.shape.int_0]"
if buildcfg.Experiment.Unified {
// Unified mangles differently
fn = "G[int]"
fn = "G[int]-shaped"
}
testDebugLines(t, "-N -l", "convertline.go", fn, []int{9, 10, 11}, true)
}

View File

@ -664,7 +664,7 @@ func (s *state) paramsToHeap() {
// newHeapaddr allocates heap memory for n and sets its heap address.
func (s *state) newHeapaddr(n *ir.Name) {
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil))
}
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
@ -692,23 +692,26 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
}
// newObject returns an SSA value denoting new(typ).
func (s *state) newObject(typ *types.Type) *ssa.Value {
func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
if rtype == nil {
rtype = s.reflectType(typ)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0]
}
func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
if !n.Type().IsPtr() {
s.Fatalf("expected pointer type: %v", n.Type())
}
elem := n.Type().Elem()
elem, rtypeExpr := n.Type().Elem(), n.ElemRType
if count != nil {
if !elem.IsArray() {
s.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
elem, rtypeExpr = elem.Elem(), n.ElemElemRType
}
size := elem.Size()
// Casting from larger type to smaller one is ok, so for smallest type, do nothing.
@ -721,12 +724,20 @@ func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value
if count.Type.Size() != s.config.PtrSize {
s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
}
s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, s.reflectType(elem), count)
var rtype *ssa.Value
if rtypeExpr != nil {
rtype = s.expr(rtypeExpr)
} else {
rtype = s.reflectType(elem)
}
s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count)
}
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
// TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs
// to supply RType expressions.
lsym := reflectdata.TypeLinksym(typ)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
}
@ -3294,7 +3305,11 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
case ir.ONEW:
n := n.(*ir.UnaryExpr)
return s.newObject(n.Type().Elem())
var rtype *ssa.Value
if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE {
rtype = s.expr(x.RType)
}
return s.newObject(n.Type().Elem(), rtype)
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
@ -6226,12 +6241,15 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if n.ITab != nil {
targetItab = s.expr(n.ITab)
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok)
}
func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X)
var target, targetItab *ssa.Value
var source, target, targetItab *ssa.Value
if n.SrcRType != nil {
source = s.expr(n.SrcRType)
}
if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
byteptr := s.f.Config.Types.BytePtr
targetItab = s.expr(n.ITab)
@ -6241,15 +6259,16 @@ func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res,
} else {
target = s.expr(n.RType)
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok)
}
// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
// and src is the type we're asserting from.
// source is the *runtime._type of src
// target is the *runtime._type of dst.
// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
byteptr := s.f.Config.Types.BytePtr
if dst.IsInterface() {
if dst.IsEmptyInterface() {
@ -6385,7 +6404,10 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targ
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.reflectType(src)
taddr := source
if taddr == nil {
taddr = s.reflectType(src)
}
if src.IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {

View File

@ -405,7 +405,7 @@ func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) {
w.string(exportPath(pkg))
if mainIndex {
w.string(pkg.Name)
w.uint64(uint64(pkg.Height))
w.uint64(0) // was package height, but not necessary anymore.
}
// Sort symbols within a package by name.
@ -1978,6 +1978,7 @@ func (w *exportWriter) expr(n ir.Node) {
w.pos(n.Pos())
w.typ(n.Type())
w.expr(n.X)
w.bool(n.Implicit())
case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
n := n.(*ir.UnaryExpr)

View File

@ -175,10 +175,9 @@ func ReadImports(pkg *types.Pkg, data string) {
for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
pkg := p.pkgAt(ird.uint64())
pkgName := p.stringAt(ird.uint64())
pkgHeight := int(ird.uint64())
_ = int(ird.uint64()) // was package height, but not necessary anymore.
if pkg.Name == "" {
pkg.Name = pkgName
pkg.Height = pkgHeight
types.NumImport[pkgName]++
// TODO(mdempsky): This belongs somewhere else.
@ -187,9 +186,6 @@ func ReadImports(pkg *types.Pkg, data string) {
if pkg.Name != pkgName {
base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
}
if pkg.Height != pkgHeight {
base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
}
}
for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
@ -1493,7 +1489,9 @@ func (r *importReader) node() ir.Node {
return n
case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
return ir.NewConvExpr(r.pos(), op, r.typ(), r.expr())
n := ir.NewConvExpr(r.pos(), op, r.typ(), r.expr())
n.SetImplicit(r.bool())
return n
case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN, ir.OUNSAFEADD, ir.OUNSAFESLICE:
pos := r.pos()

View File

@ -13,6 +13,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
@ -119,6 +120,13 @@ func ComputeAddrtaken(top []ir.Node) {
}
}
// LinksymAddr returns a new expression that evaluates to the address
// of lsym. typ specifies the type of the addressed memory.
func LinksymAddr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *ir.AddrExpr {
n := ir.NewLinksymExpr(pos, lsym, typ)
return Expr(NodAddrAt(pos, n)).(*ir.AddrExpr)
}
func NodNil() ir.Node {
n := ir.NewNilExpr(base.Pos)
n.SetType(types.Types[types.TNIL])
@ -293,24 +301,14 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
n = convlit1(n, t, false, context)
if n.Type() == nil {
return n
base.Fatalf("cannot assign %v to %v", n, t)
}
if n.Type().IsUntyped() {
base.Fatalf("%L has untyped type", n)
}
if t.Kind() == types.TBLANK {
return n
}
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
if n.Type() == types.UntypedBool && !t.IsBoolean() {
if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
r.SetType(types.Types[types.TBOOL])
r.SetTypecheck(1)
r.SetImplicit(true)
n = r
}
}
if types.Identical(n.Type(), t) {
return n
}

View File

@ -16,9 +16,6 @@ import (
// pkgMap maps a package path to a package.
var pkgMap = make(map[string]*Pkg)
// MaxPkgHeight is a height greater than any likely package height.
const MaxPkgHeight = 1e9
type Pkg struct {
Path string // string literal used in import statement, e.g. "runtime/internal/sys"
Name string // package name, e.g. "sys"
@ -26,12 +23,6 @@ type Pkg struct {
Syms map[string]*Sym
Pathsym *obj.LSym
// Height is the package's height in the import graph. Leaf
// packages (i.e., packages with no imports) have height 0,
// and all other packages have height 1 plus the maximum
// height of their imported packages.
Height int
Direct bool // imported directly
}

View File

@ -97,14 +97,7 @@ func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym {
// Less reports whether symbol a is ordered before symbol b.
//
// Symbols are ordered exported before non-exported, then by name, and
// finally (for non-exported symbols) by package height and path.
//
// Ordering by package height is necessary to establish a consistent
// ordering for non-exported names with the same spelling but from
// different packages. We don't necessarily know the path for the
// package being compiled, but by definition it will have a height
// greater than any other packages seen within the compilation unit.
// For more background, see issue #24693.
// finally (for non-exported symbols) by package path.
func (a *Sym) Less(b *Sym) bool {
if a == b {
return false
@ -131,9 +124,6 @@ func (a *Sym) Less(b *Sym) bool {
return a.Name < b.Name
}
if !ea {
if a.Pkg.Height != b.Pkg.Height {
return a.Pkg.Height < b.Pkg.Height
}
return a.Pkg.Path < b.Pkg.Path
}
return false

View File

@ -189,7 +189,7 @@ func (obj *object) sameId(pkg *Package, name string) bool {
//
// Objects are ordered nil before non-nil, exported before
// non-exported, then by name, and finally (for non-exported
// functions) by package height and path.
// functions) by package path.
func (a *object) less(b *object) bool {
if a == b {
return false
@ -215,9 +215,6 @@ func (a *object) less(b *object) bool {
return a.name < b.name
}
if !ea {
if a.pkg.height != b.pkg.height {
return a.pkg.height < b.pkg.height
}
return a.pkg.path < b.pkg.path
}

View File

@ -14,7 +14,6 @@ type Package struct {
name string
scope *Scope
imports []*Package
height int
complete bool
fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
@ -23,14 +22,8 @@ type Package struct {
// NewPackage returns a new Package for the given package path and name.
// The package is not complete and contains no explicit imports.
func NewPackage(path, name string) *Package {
return NewPackageHeight(path, name, 0)
}
// NewPackageHeight is like NewPackage, but allows specifying the
// package's height.
func NewPackageHeight(path, name string, height int) *Package {
scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
return &Package{path: path, name: name, scope: scope, height: height}
return &Package{path: path, name: name, scope: scope}
}
// Path returns the package path.
@ -39,9 +32,6 @@ func (pkg *Package) Path() string { return pkg.path }
// Name returns the package name.
func (pkg *Package) Name() string { return pkg.name }
// Height returns the package height.
func (pkg *Package) Height() int { return pkg.height }
// SetName sets the package name.
func (pkg *Package) SetName(name string) { pkg.name = name }

View File

@ -197,7 +197,6 @@ func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package {
// methods with receiver base type names.
func (check *Checker) collectObjects() {
pkg := check.pkg
pkg.height = 0
// pkgImports is the set of packages already imported by any package file seen
// so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
@ -255,15 +254,6 @@ func (check *Checker) collectObjects() {
continue
}
if imp == Unsafe {
// typecheck ignores imports of package unsafe for
// calculating height.
// TODO(mdempsky): Revisit this. This seems fine, but I
// don't remember explicitly considering this case.
} else if h := imp.height + 1; h > pkg.height {
pkg.height = h
}
// local name overrides imported package name
name := imp.name
if s.LocalPkgName != nil {

View File

@ -47,7 +47,7 @@ func TestSizeof(t *testing.T) {
// Misc
{Scope{}, 60, 104},
{Package{}, 40, 80},
{Package{}, 36, 72},
{_TypeSet{}, 28, 56},
}

View File

@ -99,10 +99,11 @@ func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
}
as.Y = r
if r.Op() == ir.OAPPEND {
r := r.(*ir.CallExpr)
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
r.X = reflectdata.AppendElemRType(base.Pos, r)
return as
}
// Otherwise, lowered for race detector.
@ -169,11 +170,11 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
var call *ir.CallExpr
if w := t.Elem().Size(); w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t, false)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
} else {
fn := mapfn("mapaccess2_fat", t, true)
z := reflectdata.ZeroAddr(w)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
call = mkcall1(fn, fn.Type().Results(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
@ -502,7 +503,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), s, nn))}
nodes.Append(nif)
// s = s[:n]
@ -523,7 +524,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2)
} else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
@ -670,7 +671,7 @@ func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), s, nn))}
nodes = append(nodes, nif)
// s = s[:n]

View File

@ -87,7 +87,7 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.AppendElemRType(base.Pos, n), ns,
ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
l = append(l, nif)
@ -141,7 +141,7 @@ func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
ptrL, lenL := backingArrayPtrLen(n.X)
n.Y = cheapExpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y)
return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
return mkcall1(fn, n.Type(), init, reflectdata.CopyElemRType(base.Pos, n), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
@ -214,7 +214,7 @@ func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
t := map_.Type()
fast := mapfast(t)
key = mapKeyArg(fast, n, key, false)
return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.DeleteMapRType(base.Pos, n), map_, key)
}
// walkLenCap walks an OLEN or OCAP node.
@ -258,7 +258,7 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
argtype = types.Types[types.TINT]
}
return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.MakeChanRType(base.Pos, n), typecheck.Conv(size, argtype))
}
// walkMakeMap walks an OMAKEMAP node.
@ -356,7 +356,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
fn := typecheck.LookupRuntime(fnname)
fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
}
// walkMakeSlice walks an OMAKESLICE node.
@ -421,7 +421,7 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
argtype = types.Types[types.TINT]
}
fn := typecheck.LookupRuntime(fnname)
ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
ptr.MarkNonNil()
len = typecheck.Conv(len, types.Types[types.TINT])
cap = typecheck.Conv(cap, types.Types[types.TINT])
@ -475,7 +475,7 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// Replace make+copy with runtime.makeslicecopy.
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := typecheck.LookupRuntime("makeslicecopy")
ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
ptr.MarkNonNil()
sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
return walkExpr(typecheck.Expr(sh), init)
@ -658,7 +658,7 @@ func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
if ir.ShouldCheckPtr(ir.CurFunc, 1) {
fnname := "unsafeslicecheckptr"
fn := typecheck.LookupRuntime(fnname)
init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(sliceType.Elem()), unsafePtr, typecheck.Conv(len, lenType)))
init.Append(mkcall1(fn, nil, init, reflectdata.UnsafeSliceElemRType(base.Pos, n), unsafePtr, typecheck.Conv(len, lenType)))
} else {
// Otherwise, open code unsafe.Slice to prevent runtime call overhead.
// Keep this code in sync with runtime.unsafeslice{,64}

View File

@ -54,6 +54,10 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
//
// TODO(mdempsky): It would be more general and probably overall
// simpler to just extend walkCompareInterface to optimize when one
// operand is an OCONVIFACE.
if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
l := cheapExpr(n.X, init)
@ -74,9 +78,12 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
//
// TODO(mdempsky): For non-empty interface comparisons, just
// compare against the itab address directly?
var eqtype ir.Node
tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
rtyp := reflectdata.TypePtr(r.Type())
rtyp := reflectdata.CompareRType(base.Pos, n)
if l.Type().IsEmptyInterface() {
tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)

View File

@ -414,9 +414,10 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// make the map var
a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
args := []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(n.Len + int64(len(n.List)))}
a := typecheck.Expr(ir.NewCallExpr(base.Pos, ir.OMAKE, nil, args)).(*ir.MakeExpr)
a.RType = n.RType
a.SetEsc(n.Esc())
a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(n.Len + int64(len(n.List)))}
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, m, a))
entries := n.List
@ -467,14 +468,18 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
kidx.SetBounded(true)
lhs := ir.NewIndexExpr(base.Pos, m, kidx)
// typechecker rewrites OINDEX to OINDEXMAP
lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, kidx)).(*ir.IndexExpr)
base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
lhs.RType = n.RType
zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem()))
incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs)
body = typecheck.Stmt(body) // typechecker rewrites OINDEX to OINDEXMAP
body = typecheck.Stmt(body)
body = orderStmtInPlace(body, map[string][]*ir.Name{})
loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil)
@ -503,8 +508,14 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
ir.SetPos(tmpelem)
var a ir.Node = ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem)
a = typecheck.Stmt(a) // typechecker rewrites OINDEX to OINDEXMAP
// typechecker rewrites OINDEX to OINDEXMAP
lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, tmpkey)).(*ir.IndexExpr)
base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
lhs.RType = n.RType
var a ir.Node = ir.NewAssignStmt(base.Pos, lhs, tmpelem)
a = typecheck.Stmt(a)
a = orderStmtInPlace(a, map[string][]*ir.Name{})
appendWalkStmt(init, a)
}

View File

@ -14,7 +14,6 @@ import (
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
)
@ -50,13 +49,8 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
}
if !fromType.IsInterface() {
var typeWord ir.Node
if toType.IsEmptyInterface() {
typeWord = reflectdata.TypePtr(fromType)
} else {
typeWord = reflectdata.ITabAddr(fromType, toType)
}
l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone))
typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n)
l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n, init))
l.SetType(toType)
l.SetTypecheck(n.Typecheck())
return l
@ -95,7 +89,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
fn := typecheck.LookupRuntime("convI2I")
types.CalcSize(fn.Type())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args = []ir.Node{reflectdata.TypePtr(toType), itab}
call.Args = []ir.Node{reflectdata.ConvIfaceTypeWord(base.Pos, n), itab}
typeWord = walkExpr(typecheck.Expr(call), init)
}
@ -107,10 +101,10 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
return e
}
// Returns the data word (the second word) used to represent n in an interface.
// n must not be of interface type.
// esc describes whether the result escapes.
func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
// Returns the data word (the second word) used to represent conv.X in
// an interface.
func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
pos, n := conv.Pos(), conv.X
fromType := n.Type()
// If it's a pointer, it is its own representation.
@ -150,7 +144,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly():
// n is a readonly global; use it directly.
value = n
case !escapes && fromType.Size() <= 1024:
case conv.Esc() == ir.EscNone && fromType.Size() <= 1024:
// n does not escape. Use a stack temporary initialized to n.
value = typecheck.Temp(fromType)
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
@ -176,7 +170,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
n = copyExpr(n, fromType, init)
}
fn = typecheck.SubstArgTypes(fn, fromType)
args = []ir.Node{reflectdata.TypePtr(fromType), typecheck.NodAddr(n)}
args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)}
} else {
// Use a specialized conversion routine that takes the type being
// converted by value, not by pointer.
@ -211,7 +205,7 @@ func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
// walkConvIData walks an OCONVIDATA node.
func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
return dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone)
return dataWord(n, init)
}
// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.

View File

@ -782,7 +782,7 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
t := map_.Type()
fast := mapfast(t)
key := mapKeyArg(fast, n, n.Index, n.Assigned)
args := []ir.Node{reflectdata.TypePtr(t), map_, key}
args := []ir.Node{reflectdata.IndexMapRType(base.Pos, n), map_, key}
var mapFn ir.Node
switch {

View File

@ -993,7 +993,7 @@ func (o *orderState) stmt(n ir.Node) {
do(0, recv.X.Type().Elem())
do(1, types.Types[types.TBOOL])
if len(init) != 0 {
ir.DumpList("ninit", r.Init())
ir.DumpList("ninit", init)
base.Fatalf("ninit on select recv")
}
orderBlock(ncas.PtrInit(), o.free)
@ -1456,8 +1456,12 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr)
base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
lhs.RType = n.RType
as := ir.NewAssignStmt(base.Pos, lhs, r.Value)
typecheck.Stmt(as)
o.stmt(as)
}

View File

@ -38,11 +38,7 @@ func cheapComputableIndex(width int64) bool {
// the returned node.
func walkRange(nrange *ir.RangeStmt) ir.Node {
if isMapClear(nrange) {
m := nrange.X
lno := ir.SetPos(m)
n := mapClear(m)
base.Pos = lno
return n
return mapClear(nrange)
}
nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
@ -107,7 +103,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// for v1 := range ha { body }
if v2 == nil {
body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
body = []ir.Node{rangeAssign(nrange, hv1)}
break
}
@ -116,10 +112,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// v1, v2 = hv1, ha[hv1]
tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, tmp})
body = []ir.Node{a}
body = []ir.Node{rangeAssign2(nrange, hv1, tmp)}
break
}
@ -144,9 +137,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
tmp.SetBounded(true)
init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)})
a := rangeAssign2(nrange, hv1, ir.NewStarExpr(base.Pos, hp))
body = append(body, a)
// Advance pointer as part of the late increment.
@ -172,7 +163,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
fn := typecheck.LookupRuntime("mapiterinit")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
fn = typecheck.LookupRuntime("mapiternext")
@ -183,11 +174,10 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
if v1 == nil {
body = nil
} else if v2 == nil {
body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
body = []ir.Node{rangeAssign(nrange, key)}
} else {
elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{key, elem})
body = []ir.Node{a}
body = []ir.Node{rangeAssign2(nrange, key, elem)}
}
case types.TCHAN:
@ -210,7 +200,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
if v1 == nil {
body = nil
} else {
body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
body = []ir.Node{rangeAssign(nrange, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
@ -275,11 +265,10 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1t, hv2})
body = append(body, a)
body = append(body, rangeAssign2(nrange, hv1t, hv2))
} else {
// v1 = hv1t
body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
body = append(body, rangeAssign(nrange, hv1t))
}
}
}
@ -314,6 +303,36 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
return n
}
// rangeAssign returns "n.Key = key".
func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node {
key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
return ir.NewAssignStmt(n.Pos(), n.Key, key)
}
// rangeAssign2 returns "n.Key, n.Value = key, value".
func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node {
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] = range".
key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType)
return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value})
}
// rangeConvert returns src, converted to dst if necessary. If a
// conversion is necessary, then typeWord and srcRType are copied to
// their respective ConvExpr fields.
func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node {
src = typecheck.Expr(src)
if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) {
return src
}
n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src)
n.TypeWord = typeWord
n.SrcRType = srcRType
return typecheck.Expr(n)
}
// isMapClear checks if n is of the form:
//
// for k := range m {
@ -360,13 +379,17 @@ func isMapClear(n *ir.RangeStmt) bool {
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m ir.Node) ir.Node {
func mapClear(nrange *ir.RangeStmt) ir.Node {
m := nrange.X
origPos := ir.SetPos(m)
defer func() { base.Pos = origPos }()
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := typecheck.LookupRuntime("mapclear")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
n := mkcallstmt1(fn, reflectdata.TypePtr(t), m)
n := mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), m)
return walkStmt(typecheck.Stmt(n))
}

View File

@ -85,8 +85,12 @@ func walkSwitchExpr(sw *ir.SwitchStmt) {
defaultGoto = jmp
}
for _, n1 := range ncase.List {
s.Add(ncase.Pos(), n1, jmp)
for i, n1 := range ncase.List {
var rtype ir.Node
if i < len(ncase.RTypes) {
rtype = ncase.RTypes[i]
}
s.Add(ncase.Pos(), n1, rtype, jmp)
}
// Process body.
@ -124,11 +128,12 @@ type exprSwitch struct {
type exprClause struct {
pos src.XPos
lo, hi ir.Node
rtype ir.Node // *runtime._type for OEQ node
jmp ir.Node
}
func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
func (s *exprSwitch) Add(pos src.XPos, expr, rtype, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, rtype: rtype, jmp: jmp}
if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
@ -233,7 +238,7 @@ func (s *exprSwitch) flush() {
// Add length case to outer switch.
cas := ir.NewBasicLit(pos, constant.MakeInt64(runLen(run)))
jmp := ir.NewBranchStmt(pos, ir.OGOTO, label)
outer.Add(pos, cas, jmp)
outer.Add(pos, cas, nil, jmp)
}
s.done.Append(ir.NewLabelStmt(s.pos, outerLabel))
outer.Emit(&s.done)
@ -342,7 +347,9 @@ func (c *exprClause) test(exprname ir.Node) ir.Node {
}
}
return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
n := ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
n.RType = c.rtype
return n
}
func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {

View File

@ -583,6 +583,30 @@ func TestIssue13566(t *testing.T) {
}
}
func TestTypeNamingOrder(t *testing.T) {
skipSpecialPlatforms(t)
// This package only handles gc export data.
if runtime.Compiler != "gc" {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
// On windows, we have to set the -D option for the compiler to avoid having a drive
// letter and an illegal ':' in the import path - just skip it (see also issue #3483).
if runtime.GOOS == "windows" {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
tmpdir := mktmpdir(t)
defer os.RemoveAll(tmpdir)
testoutdir := filepath.Join(tmpdir, "testdata")
compile(t, "testdata", "g.go", testoutdir)
// import must succeed (test for issue at hand)
_ = importPkg(t, "./testdata/g", tmpdir)
}
func TestIssue13898(t *testing.T) {
skipSpecialPlatforms(t)

View File

@ -0,0 +1,23 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Input for TestTypeNamingOrder
// ensures that the order in which "type A B" declarations are
// processed is correct; this was a problem for unified IR imports.
package g
type Client struct {
common service
A *AService
B *BService
}
type service struct {
client *Client
}
type AService service
type BService service

View File

@ -31,6 +31,8 @@ type pkgReader struct {
// laterFns holds functions that need to be invoked at the end of
// import reading.
laterFns []func()
// laterFors is used in case of 'type A B' to ensure that B is processed before A.
laterFors map[types.Type]int
}
// later adds a function to be invoked at the end of import reading.
@ -38,6 +40,15 @@ func (pr *pkgReader) later(fn func()) {
pr.laterFns = append(pr.laterFns, fn)
}
// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
func (pr *pkgReader) laterFor(t types.Type, fn func()) {
if pr.laterFors == nil {
pr.laterFors = make(map[types.Type]int)
}
pr.laterFors[t] = len(pr.laterFns)
pr.laterFns = append(pr.laterFns, fn)
}
// readUnifiedPackage reads a package description from the given
// unified IR export data decoder.
func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
@ -60,7 +71,7 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
r.Bool() // has init
r.Bool() // TODO(mdempsky): Remove; was "has init"
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
@ -198,21 +209,49 @@ func (r *reader) doPkg() *types.Package {
}
name := r.String()
height := r.Len()
// Was: "pkg := types.NewPackageHeight(path, name, height)"
pkg, _ := types.NewPackage(path, name), height
pkg := types.NewPackage(path, name)
r.p.imports[path] = pkg
imports := make([]*types.Package, r.Len())
for i := range imports {
imports[i] = r.pkg()
}
pkg.SetImports(imports)
// The documentation for (*types.Package).Imports requires
// flattening the import graph when reading from export data, as
// obviously incorrect as that is.
//
// TODO(mdempsky): Remove this if go.dev/issue/54096 is accepted.
pkg.SetImports(flattenImports(imports))
return pkg
}
// flattenImports returns the transitive closure of all imported
// packages rooted from pkgs.
func flattenImports(pkgs []*types.Package) []*types.Package {
var res []*types.Package
seen := make(map[*types.Package]bool)
var add func(pkg *types.Package)
add = func(pkg *types.Package) {
if seen[pkg] {
return
}
seen[pkg] = true
res = append(res, pkg)
for _, imp := range pkg.Imports() {
add(imp)
}
}
for _, pkg := range pkgs {
add(pkg)
}
return res
}
// @@@ Types
func (r *reader) typ() types.Type {
@ -459,7 +498,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
// unit tests expected that), but cmd/compile doesn't care
// about it, so maybe we can avoid worrying about that here.
rhs := r.typ()
r.p.later(func() {
pk := r.p
pk.laterFor(named, func() {
// First be sure that the rhs is initialized, if it needs to be initialized.
delete(pk.laterFors, named) // prevent cycles
if i, ok := pk.laterFors[rhs]; ok {
f := pk.laterFns[i]
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
f() // initialize RHS
}
underlying := rhs.Underlying()
named.SetUnderlying(underlying)
})

View File

@ -18,6 +18,12 @@ import (
// A PkgDecoder provides methods for decoding a package's Unified IR
// export data.
type PkgDecoder struct {
// version is the file format version.
version uint32
// sync indicates whether the file uses sync markers.
sync bool
// pkgPath is the package path for the package to be decoded.
//
// TODO(mdempsky): Remove; unneeded since CL 391014.
@ -52,6 +58,9 @@ type PkgDecoder struct {
// TODO(mdempsky): Remove; unneeded since CL 391014.
func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
// SyncMarkers reports whether pr uses sync markers.
func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
@ -67,9 +76,18 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
var version uint32
assert(binary.Read(r, binary.LittleEndian, &version) == nil)
assert(version == 0)
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
switch pr.version {
default:
panic(fmt.Errorf("unsupported version: %v", pr.version))
case 0:
// no flags
case 1:
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
}
assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
@ -215,7 +233,7 @@ func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
//
// If EnableSync is false, then Sync is a no-op.
func (r *Decoder) Sync(mWant SyncMarker) {
if !EnableSync {
if !r.common.sync {
return
}

View File

@ -14,6 +14,16 @@ import (
"runtime"
)
// currentVersion is the current version number.
//
// - v0: initial prototype
//
// - v1: adds the flags uint32 word
//
// TODO(mdempsky): For the next version bump:
// - remove the legacy "has init" bool from the public root
const currentVersion uint32 = 1
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
@ -25,15 +35,21 @@ type PkgEncoder struct {
// elems[RelocString][stringsIdx[s]] == s (if present).
stringsIdx map[string]Index
// syncFrames is the number of frames to write at each sync
// marker. A negative value means sync markers are omitted.
syncFrames int
}
// SyncMarkers reports whether pw uses sync markers.
func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// NewPkgEncoder returns an initialized PkgEncoder.
//
// syncFrames is the number of caller frames that should be serialized
// at Sync points. Serializing additional frames results in larger
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code.
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
func NewPkgEncoder(syncFrames int) PkgEncoder {
return PkgEncoder{
stringsIdx: make(map[string]Index),
@ -51,7 +67,13 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
writeUint32(0) // version
writeUint32(currentVersion)
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
}
writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@ -204,7 +226,7 @@ func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
}
func (w *Encoder) Sync(m SyncMarker) {
if !EnableSync {
if !w.p.SyncMarkers() {
return
}
@ -297,8 +319,14 @@ func (w *Encoder) Code(c Code) {
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
w.StringRef(w.p.StringIdx(s))
}
// StringRef writes a reference to the given index, which must be a
// previously encoded string value.
func (w *Encoder) StringRef(idx Index) {
w.Sync(SyncString)
w.Reloc(RelocString, w.p.StringIdx(s))
w.Reloc(RelocString, idx)
}
// Strings encodes and writes a variable-length slice of strings into

View File

@ -0,0 +1,9 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
const (
flagSyncMarkers = 1 << iota // file format contains sync markers
)

View File

@ -10,17 +10,6 @@ import (
"strings"
)
// EnableSync controls whether sync markers are written into unified
// IR's export data format and also whether they're expected when
// reading them back in. They're inessential to the correct
// functioning of unified IR, but are helpful during development to
// detect mistakes.
//
// When sync is enabled, writer stack frames will also be included in
// the export data. Currently, a fixed number of frames are included,
// controlled by -d=syncframes (default 0).
const EnableSync = true
// fmtFrames formats a backtrace for reporting reader/writer desyncs.
func fmtFrames(pcs ...uintptr) []string {
res := make([]string, 0, len(pcs))
@ -109,6 +98,7 @@ const (
SyncExprs
SyncExpr
SyncExprType
SyncAssign
SyncOp
SyncFuncLit
SyncCompLit
@ -139,4 +129,8 @@ const (
SyncStmtsEnd
SyncLabel
SyncOptLabel
SyncMultiExpr
SyncRType
SyncConvRTTI
)

View File

@ -45,39 +45,40 @@ func _() {
_ = x[SyncExprs-35]
_ = x[SyncExpr-36]
_ = x[SyncExprType-37]
_ = x[SyncOp-38]
_ = x[SyncFuncLit-39]
_ = x[SyncCompLit-40]
_ = x[SyncDecl-41]
_ = x[SyncFuncBody-42]
_ = x[SyncOpenScope-43]
_ = x[SyncCloseScope-44]
_ = x[SyncCloseAnotherScope-45]
_ = x[SyncDeclNames-46]
_ = x[SyncDeclName-47]
_ = x[SyncStmts-48]
_ = x[SyncBlockStmt-49]
_ = x[SyncIfStmt-50]
_ = x[SyncForStmt-51]
_ = x[SyncSwitchStmt-52]
_ = x[SyncRangeStmt-53]
_ = x[SyncCaseClause-54]
_ = x[SyncCommClause-55]
_ = x[SyncSelectStmt-56]
_ = x[SyncDecls-57]
_ = x[SyncLabeledStmt-58]
_ = x[SyncUseObjLocal-59]
_ = x[SyncAddLocal-60]
_ = x[SyncLinkname-61]
_ = x[SyncStmt1-62]
_ = x[SyncStmtsEnd-63]
_ = x[SyncLabel-64]
_ = x[SyncOptLabel-65]
_ = x[SyncAssign-38]
_ = x[SyncOp-39]
_ = x[SyncFuncLit-40]
_ = x[SyncCompLit-41]
_ = x[SyncDecl-42]
_ = x[SyncFuncBody-43]
_ = x[SyncOpenScope-44]
_ = x[SyncCloseScope-45]
_ = x[SyncCloseAnotherScope-46]
_ = x[SyncDeclNames-47]
_ = x[SyncDeclName-48]
_ = x[SyncStmts-49]
_ = x[SyncBlockStmt-50]
_ = x[SyncIfStmt-51]
_ = x[SyncForStmt-52]
_ = x[SyncSwitchStmt-53]
_ = x[SyncRangeStmt-54]
_ = x[SyncCaseClause-55]
_ = x[SyncCommClause-56]
_ = x[SyncSelectStmt-57]
_ = x[SyncDecls-58]
_ = x[SyncLabeledStmt-59]
_ = x[SyncUseObjLocal-60]
_ = x[SyncAddLocal-61]
_ = x[SyncLinkname-62]
_ = x[SyncStmt1-63]
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
}
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprAssertTypeOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 228, 230, 237, 244, 248, 256, 265, 275, 292, 301, 309, 314, 323, 329, 336, 346, 355, 365, 375, 385, 390, 401, 412, 420, 428, 433, 441, 446, 454}
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
func (i SyncMarker) String() string {
i -= 1

View File

@ -234,16 +234,6 @@ func dotTypeEscape2() { // #13805, #15796
*(&v) = x.(int)
*(&v), *(&ok) = y.(int)
}
{
i := 0
j := 0
var ok bool
var x interface{} = i // ERROR "i does not escape"
var y interface{} = j // ERROR "j does not escape"
sink = x.(int) // ERROR "x.\(int\) escapes to heap"
sink, *(&ok) = y.(int)
}
{
i := 0 // ERROR "moved to heap: i"
j := 0 // ERROR "moved to heap: j"

View File

@ -0,0 +1,25 @@
// errorcheck -0 -m -l
//go:build !goexperiment.unified
// +build !goexperiment.unified
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
var sink interface{}
func dotTypeEscape2() { // #13805, #15796
{
i := 0
j := 0
var ok bool
var x interface{} = i // ERROR "i does not escape"
var y interface{} = j // ERROR "j does not escape"
sink = x.(int) // ERROR "x.\(int\) escapes to heap"
// BAD: should be "y.\(int\) escapes to heap" too
sink, *(&ok) = y.(int)
}
}

View File

@ -0,0 +1,24 @@
// errorcheck -0 -m -l
//go:build goexperiment.unified
// +build goexperiment.unified
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
var sink interface{}
func dotTypeEscape2() { // #13805, #15796
{
i := 0
j := 0
var ok bool
var x interface{} = i // ERROR "i does not escape"
var y interface{} = j // ERROR "j does not escape"
sink = x.(int) // ERROR "x.\(int\) escapes to heap"
sink, *(&ok) = y.(int) // ERROR "autotmp_.* escapes to heap"
}
}

View File

@ -1,13 +0,0 @@
package Äfoo
var ÄbarV int = 101
func Äbar(x int) int {
defer func() { ÄbarV += 3 }()
return Äblix(x)
}
func Äblix(x int) int {
defer func() { ÄbarV += 9 }()
return ÄbarV + x
}

View File

@ -1,13 +0,0 @@
package main
import (
"fmt"
"./Äfoo"
Äblix "./Äfoo"
)
func main() {
fmt.Printf("Äfoo.Äbar(33) returns %v\n", Äfoo.Äbar(33))
fmt.Printf("Äblix.Äbar(33) returns %v\n", Äblix.Äbar(33))
}

View File

@ -0,0 +1,17 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package Þfoo
var ÞbarV int = 101
func Þbar(x int) int {
defer func() { ÞbarV += 3 }()
return Þblix(x)
}
func Þblix(x int) int {
defer func() { ÞbarV += 9 }()
return ÞbarV + x
}

View File

@ -0,0 +1,17 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"./Þfoo"
Þblix "./Þfoo"
)
func main() {
fmt.Printf("Þfoo.Þbar(33) returns %v\n", Þfoo.Þbar(33))
fmt.Printf("Þblix.Þbar(33) returns %v\n", Þblix.Þbar(33))
}

View File

@ -36,7 +36,11 @@ func main() {
{"type assertion", "", func() { _ = x == x.(*int) }},
{"out of bounds", "", func() { _ = x == s[1] }},
{"nil pointer dereference #1", "", func() { _ = x == *p }},
{"nil pointer dereference #2", "nil pointer dereference", func() { _ = *l == r[0] }},
// TODO(mdempsky): Restore "nil pointer dereference" check. The Go
// spec doesn't mandate an order for panics (or even panic
// messages), but left-to-right is less confusing to users.
{"nil pointer dereference #2", "", func() { _ = *l == r[0] }},
{"nil pointer dereference #3", "", func() { _ = *l == any(r[0]) }},
}
for _, tc := range tests {
@ -44,16 +48,14 @@ func main() {
}
}
func testFuncShouldPanic(name, errStr string, f func()) {
func testFuncShouldPanic(name, want string, f func()) {
defer func() {
e := recover()
if e == nil {
log.Fatalf("%s: comparison did not panic\n", name)
}
if errStr != "" {
if !strings.Contains(e.(error).Error(), errStr) {
log.Fatalf("%s: wrong panic message\n", name)
}
if have := e.(error).Error(); !strings.Contains(have, want) {
log.Fatalf("%s: wrong panic message: have %q, want %q\n", name, have, want)
}
}()
f()

View File

@ -7,7 +7,7 @@ package b
import "./a"
func g() {
h := a.E() // ERROR "inlining call to a.E" "a.I\(a.T\(0\)\) does not escape"
h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape"
h.M() // ERROR "devirtualizing h.M to a.T"
// BAD: T(0) could be stack allocated.

View File

@ -0,0 +1,21 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package a
type I interface{}
type F func()
type s struct {
f F
}
func NewWithF(f F) *s {
return &s{f: f}
}
func NewWithFuncI(func() I) *s {
return &s{}
}

View File

@ -0,0 +1,17 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package b
import (
"./a"
)
type S struct{}
func (s *S) M1() a.I {
return a.NewWithF(s.M2)
}
func (s *S) M2() {}

View File

@ -0,0 +1,14 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
import (
"./a"
"./b"
)
func f() {
a.NewWithFuncI((&b.S{}).M1)
}

View File

@ -0,0 +1,7 @@
// compiledir
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ignored

View File

@ -41,7 +41,7 @@ func bufferNoEscape3(xs []string) string { // ERROR "xs does not escape$"
func bufferNoEscape4() []byte {
var b bytes.Buffer
b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" "string\(.*\) escapes to heap"
b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" `".+" escapes to heap`
useBuffer(&b)
return b.Bytes() // ERROR "inlining call to bytes.\(\*Buffer\).Bytes$"
}

View File

@ -107,18 +107,6 @@ func q(x int) int { // ERROR "can inline q"
return foo() // ERROR "inlining call to q.func1"
}
func r(z int) int {
foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape"
return x + z
}
bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2"
return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3"
return 2*y + x*z
}(x) // ERROR "inlining call to r.func2.1"
}
return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3"
}
func s0(x int) int { // ERROR "can inline s0"
foo := func() { // ERROR "can inline s0.func1" "func literal does not escape"
x = x + 1

21
test/inline_nounified.go Normal file
View File

@ -0,0 +1,21 @@
// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
//go:build !goexperiment.unified
// +build !goexperiment.unified
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package foo
func r(z int) int {
foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape"
return x + z
}
bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2"
return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3"
return 2*y + x*z
}(x) // ERROR "inlining call to r.func2.1"
}
return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3"
}

21
test/inline_unified.go Normal file
View File

@ -0,0 +1,21 @@
// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
//go:build goexperiment.unified
// +build goexperiment.unified
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package foo
func r(z int) int {
foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape"
return x + z
}
bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2"
return x + func(y int) int { // ERROR "can inline r.func2.1"
return 2*y + x*z
}(x) // ERROR "inlining call to r.func2.1"
}
return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "can inline r.func3" "inlining call to r.func3"
}

View File

@ -1,4 +1,5 @@
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
//go:build (amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs)
// +build amd64,goexperiment.regabiargs arm64,goexperiment.regabiargs
// Copyright 2014 The Go Authors. All rights reserved.
@ -601,7 +602,7 @@ func f38(b bool) {
printnl()
case *fi38(2) = <-fc38(): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$"
printnl()
case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$"
case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to f[ibc]38:( .autotmp_[0-9]+)+$"
printnl()
}
printnl()

View File

@ -184,6 +184,7 @@ func f4(x *[10]int) {
func f5(m map[string]struct{}) bool {
// Existence-only map lookups should not generate a nil check
_, ok := m[""]
tmp1, tmp2 := m[""] // ERROR "removed nil check"
_, ok := tmp1, tmp2
return ok
}

View File

@ -1980,8 +1980,11 @@ var types2Failures32Bit = setOf(
)
var go118Failures = setOf(
"typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics
"typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error
"typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics
"typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error
"typeparam/mdempsky/16.go", // 1.18 compiler uses interface shape type in failed type assertions
"typeparam/mdempsky/17.go", // 1.18 compiler mishandles implicit conversions from range loops
"typeparam/mdempsky/18.go", // 1.18 compiler mishandles implicit conversions in select statements
)
// In all of these cases, the 1.17 compiler reports reasonable errors, but either the
@ -2009,18 +2012,10 @@ var _ = setOf(
)
var unifiedFailures = setOf(
"closure3.go", // unified IR numbers closures differently than -d=inlfuncswithclosures
"escape4.go", // unified IR can inline f5 and f6; test doesn't expect this
"inline.go", // unified IR reports function literal diagnostics on different lines than -d=inlfuncswithclosures
"linkname3.go", // unified IR is missing some linkname errors
"closure3.go", // unified IR numbers closures differently than -d=inlfuncswithclosures
"escape4.go", // unified IR can inline f5 and f6; test doesn't expect this
"fixedbugs/issue42284.go", // prints "T(0) does not escape", but test expects "a.I(a.T(0)) does not escape"
"fixedbugs/issue7921.go", // prints "… escapes to heap", but test expects "string(…) escapes to heap"
"typeparam/issue47631.go", // unified IR can handle local type declarations
"fixedbugs/issue42058a.go", // unified IR doesn't report channel element too large
"fixedbugs/issue42058b.go", // unified IR doesn't report channel element too large
"fixedbugs/issue49767.go", // unified IR doesn't report channel element too large
"fixedbugs/issue49814.go", // unified IR doesn't report array type too large
"typeparam/issue47631.go", // unified IR can handle local type declarations
)
func setOf(keys ...string) map[string]bool {

View File

@ -400,4 +400,18 @@ func main() {
case i > x:
os.Exit(1)
}
// Unified IR converts the tag and all case values to empty
// interface, when any of the case values aren't assignable to the
// tag value's type. Make sure that `case nil:` compares against the
// tag type's nil value (i.e., `(*int)(nil)`), not nil interface
// (i.e., `any(nil)`).
switch (*int)(nil) {
case nil:
// ok
case any(nil):
assert(false, "case any(nil) matched")
default:
assert(false, "default matched")
}
}

View File

@ -0,0 +1,34 @@
// run
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that type assertion panics mention the real interface type,
// not their shape type.
package main
import (
"fmt"
"runtime"
"strings"
)
func main() {
// The exact error message isn't important, but it should mention
// `main.T`, not `go.shape.int_0`.
if have := F[T](); !strings.Contains(have, "interface { T() main.T }") {
fmt.Printf("FAIL: unexpected panic message: %q\n", have)
}
}
type T int
func F[T any]() (res string) {
defer func() {
res = recover().(runtime.Error).Error()
}()
_ = interface{ T() T }(nil).(T)
return
}

View File

@ -0,0 +1,110 @@
// run
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that implicit conversions of derived types to interface type
// in range loops work correctly.
package main
import (
"fmt"
"reflect"
)
func main() {
test{"int", "V"}.match(RangeArrayAny[V]())
test{"int", "V"}.match(RangeArrayIface[V]())
test{"V"}.match(RangeChanAny[V]())
test{"V"}.match(RangeChanIface[V]())
test{"K", "V"}.match(RangeMapAny[K, V]())
test{"K", "V"}.match(RangeMapIface[K, V]())
test{"int", "V"}.match(RangeSliceAny[V]())
test{"int", "V"}.match(RangeSliceIface[V]())
}
type test []string
func (t test) match(args ...any) {
if len(t) != len(args) {
fmt.Printf("FAIL: want %v values, have %v\n", len(t), len(args))
return
}
for i, want := range t {
if have := reflect.TypeOf(args[i]).Name(); want != have {
fmt.Printf("FAIL: %v: want type %v, have %v\n", i, want, have)
}
}
}
type iface interface{ M() int }
type K int
type V int
func (K) M() int { return 0 }
func (V) M() int { return 0 }
func RangeArrayAny[V any]() (k, v any) {
for k, v = range [...]V{zero[V]()} {
}
return
}
func RangeArrayIface[V iface]() (k any, v iface) {
for k, v = range [...]V{zero[V]()} {
}
return
}
func RangeChanAny[V any]() (v any) {
for v = range chanOf(zero[V]()) {
}
return
}
func RangeChanIface[V iface]() (v iface) {
for v = range chanOf(zero[V]()) {
}
return
}
func RangeMapAny[K comparable, V any]() (k, v any) {
for k, v = range map[K]V{zero[K](): zero[V]()} {
}
return
}
func RangeMapIface[K interface {
iface
comparable
}, V iface]() (k, v iface) {
for k, v = range map[K]V{zero[K](): zero[V]()} {
}
return
}
func RangeSliceAny[V any]() (k, v any) {
for k, v = range []V{zero[V]()} {
}
return
}
func RangeSliceIface[V iface]() (k any, v iface) {
for k, v = range []V{zero[V]()} {
}
return
}
func chanOf[T any](elems ...T) chan T {
c := make(chan T, len(elems))
for _, elem := range elems {
c <- elem
}
close(c)
return c
}
func zero[T any]() (_ T) { return }

View File

@ -0,0 +1,26 @@
// run
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that implicit conversions to interface type in a select/case
// clause are compiled correctly.
package main
import "fmt"
func main() { f[int]() }
func f[T any]() {
ch := make(chan T)
close(ch)
var i, ok any
select {
case i, ok = <-ch:
}
fmt.Printf("%T %T\n", i, ok)
}

View File

@ -0,0 +1 @@
int bool