mirror of
https://github.com/golang/go
synced 2024-11-19 18:44:41 -07:00
cmd/compile: add indexed export format
This CL introduces a new indexed data format for package export data. This improves on the previous (sequential) binary format by allowing the compiler to selectively (and lazily) load only the data that's actually needed for compilation. In large Go projects, the package export data can become very large due to transitive type declaration dependencies and inline function/method bodies. By lazily loading these declarations and bodies as needed, we avoid wasting time and memory processing unnecessary and/or redundant data. In the benchmarks below, "old" is -iexport=false and "new" is -iexport=true. The suffixes indicate the compiler concurrency (-c) and inlining (-l) settings used for the build (using -gcflags=all=-foo). Benchmarks were run on an HP Z620. Juju is "go build -a github.com/juju/juju/cmd/...": name old real-time/op new real-time/op delta Juju/c=1/l=0 44.0s ± 1% 38.7s ± 9% -11.97% (p=0.001 n=7+7) Juju/c=1/l=4 53.7s ± 3% 45.3s ± 4% -15.53% (p=0.001 n=7+7) Juju/c=4/l=0 39.7s ± 8% 32.0s ± 4% -19.38% (p=0.001 n=7+7) Juju/c=4/l=4 46.3s ± 4% 38.0s ± 4% -18.06% (p=0.001 n=7+7) name old user-time/op new user-time/op delta Juju/c=1/l=0 371s ± 1% 300s ± 0% -19.07% (p=0.001 n=7+6) Juju/c=1/l=4 482s ± 0% 374s ± 1% -22.37% (p=0.001 n=7+7) Juju/c=4/l=0 410s ± 1% 340s ± 1% -17.19% (p=0.001 n=7+7) Juju/c=4/l=4 532s ± 1% 424s ± 1% -20.26% (p=0.001 n=7+7) name old sys-time/op new sys-time/op delta Juju/c=1/l=0 33.4s ± 1% 28.4s ± 2% -15.02% (p=0.001 n=7+7) Juju/c=1/l=4 40.7s ± 2% 32.8s ± 3% -19.51% (p=0.001 n=7+7) Juju/c=4/l=0 39.8s ± 2% 34.4s ± 2% -13.74% (p=0.001 n=7+7) Juju/c=4/l=4 48.4s ± 2% 40.4s ± 2% -16.50% (p=0.001 n=7+7) Kubelet is "go build -a k8s.io/kubernetes/cmd/kubelet": name old real-time/op new real-time/op delta Kubelet/c=1/l=0 42.0s ± 1% 34.8s ± 1% -17.27% (p=0.008 n=5+5) Kubelet/c=1/l=4 55.4s ± 3% 45.4s ± 3% -18.06% (p=0.002 n=6+6) Kubelet/c=4/l=0 37.4s ± 3% 29.9s ± 1% -20.25% (p=0.004 n=6+5) Kubelet/c=4/l=4 48.1s ± 2% 39.0s ± 5% -18.93% (p=0.002 n=6+6) name old user-time/op new user-time/op delta Kubelet/c=1/l=0 291s ± 1% 233s ± 1% -19.96% (p=0.002 n=6+6) Kubelet/c=1/l=4 385s ± 1% 298s ± 1% -22.51% (p=0.002 n=6+6) Kubelet/c=4/l=0 325s ± 0% 268s ± 1% -17.48% (p=0.004 n=5+6) Kubelet/c=4/l=4 429s ± 1% 343s ± 1% -20.08% (p=0.002 n=6+6) name old sys-time/op new sys-time/op delta Kubelet/c=1/l=0 25.1s ± 2% 20.9s ± 4% -16.69% (p=0.002 n=6+6) Kubelet/c=1/l=4 31.2s ± 3% 24.4s ± 0% -21.67% (p=0.010 n=6+4) Kubelet/c=4/l=0 30.2s ± 2% 25.6s ± 1% -15.34% (p=0.002 n=6+6) Kubelet/c=4/l=4 37.3s ± 1% 30.9s ± 2% -17.11% (p=0.002 n=6+6) Change-Id: Ie43eb3bbe1392cbb61c86792a17a57b33b9561f0 Reviewed-on: https://go-review.googlesource.com/106796 Run-TryBot: Matthew Dempsky <mdempsky@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Robert Griesemer <gri@golang.org>
This commit is contained in:
parent
03f546eb60
commit
ca2f85fd3f
@ -598,6 +598,7 @@ var knownFormats = map[string]string{
|
|||||||
"*cmd/internal/obj.LSym %v": "",
|
"*cmd/internal/obj.LSym %v": "",
|
||||||
"*math/big.Int %#x": "",
|
"*math/big.Int %#x": "",
|
||||||
"*math/big.Int %s": "",
|
"*math/big.Int %s": "",
|
||||||
|
"*math/big.Int %v": "",
|
||||||
"[16]byte %x": "",
|
"[16]byte %x": "",
|
||||||
"[]*cmd/compile/internal/gc.Node %v": "",
|
"[]*cmd/compile/internal/gc.Node %v": "",
|
||||||
"[]*cmd/compile/internal/ssa.Block %v": "",
|
"[]*cmd/compile/internal/ssa.Block %v": "",
|
||||||
@ -612,6 +613,7 @@ var knownFormats = map[string]string{
|
|||||||
"bool %v": "",
|
"bool %v": "",
|
||||||
"byte %08b": "",
|
"byte %08b": "",
|
||||||
"byte %c": "",
|
"byte %c": "",
|
||||||
|
"byte %v": "",
|
||||||
"cmd/compile/internal/arm.shift %d": "",
|
"cmd/compile/internal/arm.shift %d": "",
|
||||||
"cmd/compile/internal/gc.Class %d": "",
|
"cmd/compile/internal/gc.Class %d": "",
|
||||||
"cmd/compile/internal/gc.Class %s": "",
|
"cmd/compile/internal/gc.Class %s": "",
|
||||||
@ -631,6 +633,7 @@ var knownFormats = map[string]string{
|
|||||||
"cmd/compile/internal/gc.Val %v": "",
|
"cmd/compile/internal/gc.Val %v": "",
|
||||||
"cmd/compile/internal/gc.fmtMode %d": "",
|
"cmd/compile/internal/gc.fmtMode %d": "",
|
||||||
"cmd/compile/internal/gc.initKind %d": "",
|
"cmd/compile/internal/gc.initKind %d": "",
|
||||||
|
"cmd/compile/internal/gc.itag %v": "",
|
||||||
"cmd/compile/internal/ssa.BranchPrediction %d": "",
|
"cmd/compile/internal/ssa.BranchPrediction %d": "",
|
||||||
"cmd/compile/internal/ssa.Edge %v": "",
|
"cmd/compile/internal/ssa.Edge %v": "",
|
||||||
"cmd/compile/internal/ssa.GCNode %v": "",
|
"cmd/compile/internal/ssa.GCNode %v": "",
|
||||||
|
@ -442,6 +442,8 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
|
|||||||
xfunc.Func.SetDupok(true)
|
xfunc.Func.SetDupok(true)
|
||||||
xfunc.Func.SetNeedctxt(true)
|
xfunc.Func.SetNeedctxt(true)
|
||||||
|
|
||||||
|
tfn.Type.SetPkg(t0.Pkg())
|
||||||
|
|
||||||
// Declare and initialize variable holding receiver.
|
// Declare and initialize variable holding receiver.
|
||||||
|
|
||||||
cv := nod(OCLOSUREVAR, nil, nil)
|
cv := nod(OCLOSUREVAR, nil, nil)
|
||||||
|
@ -5,8 +5,6 @@
|
|||||||
package gc
|
package gc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"cmd/compile/internal/types"
|
"cmd/compile/internal/types"
|
||||||
"cmd/internal/bio"
|
"cmd/internal/bio"
|
||||||
"cmd/internal/src"
|
"cmd/internal/src"
|
||||||
@ -14,6 +12,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
flagiexport bool // if set, use indexed export data format
|
||||||
|
|
||||||
Debug_export int // if set, print debugging information about export data
|
Debug_export int // if set, print debugging information about export data
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -72,32 +72,15 @@ func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|||||||
func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
|
func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
|
||||||
|
|
||||||
func dumpexport(bout *bio.Writer) {
|
func dumpexport(bout *bio.Writer) {
|
||||||
size := 0 // size of export section without enclosing markers
|
|
||||||
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
|
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
|
||||||
exportf(bout, "\n$$B\n") // indicate binary export format
|
exportf(bout, "\n$$B\n") // indicate binary export format
|
||||||
if debugFormat {
|
off := bout.Offset()
|
||||||
// save a copy of the export data
|
if flagiexport {
|
||||||
var copy bytes.Buffer
|
iexport(bout.Writer)
|
||||||
bcopy := bufio.NewWriter(©)
|
|
||||||
size = export(bcopy, Debug_export != 0)
|
|
||||||
bcopy.Flush() // flushing to bytes.Buffer cannot fail
|
|
||||||
if n, err := bout.Write(copy.Bytes()); n != size || err != nil {
|
|
||||||
Fatalf("error writing export data: got %d bytes, want %d bytes, err = %v", n, size, err)
|
|
||||||
}
|
|
||||||
// export data must contain no '$' so that we can find the end by searching for "$$"
|
|
||||||
// TODO(gri) is this still needed?
|
|
||||||
if bytes.IndexByte(copy.Bytes(), '$') >= 0 {
|
|
||||||
Fatalf("export data contains $")
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify that we can read the copied export data back in
|
|
||||||
// (use empty package map to avoid collisions)
|
|
||||||
types.CleanroomDo(func() {
|
|
||||||
Import(types.NewPkg("", ""), bufio.NewReader(©)) // must not die
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
size = export(bout.Writer, Debug_export != 0)
|
export(bout.Writer, Debug_export != 0)
|
||||||
}
|
}
|
||||||
|
size := bout.Offset() - off
|
||||||
exportf(bout, "\n$$\n")
|
exportf(bout, "\n$$\n")
|
||||||
|
|
||||||
if Debug_export != 0 {
|
if Debug_export != 0 {
|
||||||
@ -108,6 +91,14 @@ func dumpexport(bout *bio.Writer) {
|
|||||||
func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op) *Node {
|
func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op) *Node {
|
||||||
n := asNode(s.Def)
|
n := asNode(s.Def)
|
||||||
if n == nil {
|
if n == nil {
|
||||||
|
// iimport should have created a stub ONONAME
|
||||||
|
// declaration for all imported symbols. The exception
|
||||||
|
// is declarations for Runtimepkg, which are populated
|
||||||
|
// by loadsys instead.
|
||||||
|
if flagiexport && s.Pkg != Runtimepkg {
|
||||||
|
Fatalf("missing ONONAME for %v\n", s)
|
||||||
|
}
|
||||||
|
|
||||||
n = dclname(s)
|
n = dclname(s)
|
||||||
s.Def = asTypesNode(n)
|
s.Def = asTypesNode(n)
|
||||||
s.Importdef = ipkg
|
s.Importdef = ipkg
|
||||||
|
1414
src/cmd/compile/internal/gc/iexport.go
Normal file
1414
src/cmd/compile/internal/gc/iexport.go
Normal file
File diff suppressed because it is too large
Load Diff
1066
src/cmd/compile/internal/gc/iimport.go
Normal file
1066
src/cmd/compile/internal/gc/iimport.go
Normal file
File diff suppressed because it is too large
Load Diff
@ -59,6 +59,10 @@ func fnpkg(fn *Node) *types.Pkg {
|
|||||||
func typecheckinl(fn *Node) {
|
func typecheckinl(fn *Node) {
|
||||||
lno := setlineno(fn)
|
lno := setlineno(fn)
|
||||||
|
|
||||||
|
if flagiexport {
|
||||||
|
expandInline(fn)
|
||||||
|
}
|
||||||
|
|
||||||
// typecheckinl is only for imported functions;
|
// typecheckinl is only for imported functions;
|
||||||
// their bodies may refer to unsafe as long as the package
|
// their bodies may refer to unsafe as long as the package
|
||||||
// was marked safe during import (which was checked then).
|
// was marked safe during import (which was checked then).
|
||||||
|
@ -244,6 +244,7 @@ func Main(archInit func(*Arch)) {
|
|||||||
flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
|
flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
|
||||||
flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
|
flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
|
||||||
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
|
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
|
||||||
|
flag.BoolVar(&flagiexport, "iexport", false, "export indexed package data")
|
||||||
objabi.Flagparse(usage)
|
objabi.Flagparse(usage)
|
||||||
|
|
||||||
// Record flags that affect the build result. (And don't
|
// Record flags that affect the build result. (And don't
|
||||||
@ -1107,7 +1108,20 @@ func importfile(f *Val) *types.Pkg {
|
|||||||
fmt.Printf("importing %s (%s)\n", path_, file)
|
fmt.Printf("importing %s (%s)\n", path_, file)
|
||||||
}
|
}
|
||||||
imp.ReadByte() // skip \n after $$B
|
imp.ReadByte() // skip \n after $$B
|
||||||
|
|
||||||
|
c, err = imp.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
yyerror("import %s: reading input: %v", file, err)
|
||||||
|
errorexit()
|
||||||
|
}
|
||||||
|
|
||||||
|
if c == 'i' {
|
||||||
|
iimport(importpkg, imp)
|
||||||
|
} else {
|
||||||
|
// Old export format always starts with 'c', 'd', or 'v'.
|
||||||
|
imp.UnreadByte()
|
||||||
Import(importpkg, imp.Reader)
|
Import(importpkg, imp.Reader)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
yyerror("no import in %q", path_)
|
yyerror("no import in %q", path_)
|
||||||
|
48
src/cmd/compile/internal/gc/mapfile_mmap.go
Normal file
48
src/cmd/compile/internal/gc/mapfile_mmap.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(mdempsky): Is there a higher-level abstraction that still
|
||||||
|
// works well for iimport?
|
||||||
|
|
||||||
|
// mapFile returns length bytes from the file starting at the
|
||||||
|
// specified offset as a string.
|
||||||
|
func mapFile(f *os.File, offset, length int64) (string, error) {
|
||||||
|
// POSIX mmap: "The implementation may require that off is a
|
||||||
|
// multiple of the page size."
|
||||||
|
x := offset & int64(os.Getpagesize()-1)
|
||||||
|
offset -= x
|
||||||
|
length += x
|
||||||
|
|
||||||
|
buf, err := syscall.Mmap(int(f.Fd()), offset, int(length), syscall.PROT_READ, syscall.MAP_SHARED)
|
||||||
|
keepAlive(f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = buf[x:]
|
||||||
|
pSlice := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||||
|
|
||||||
|
var res string
|
||||||
|
pString := (*reflect.StringHeader)(unsafe.Pointer(&res))
|
||||||
|
|
||||||
|
pString.Data = pSlice.Data
|
||||||
|
pString.Len = pSlice.Len
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// keepAlive is a reimplementation of runtime.KeepAlive, which wasn't
|
||||||
|
// added until Go 1.7, whereas we need to compile with Go 1.4.
|
||||||
|
var keepAlive = func(interface{}) {}
|
21
src/cmd/compile/internal/gc/mapfile_read.go
Normal file
21
src/cmd/compile/internal/gc/mapfile_read.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mapFile(f *os.File, offset, length int64) (string, error) {
|
||||||
|
buf := make([]byte, length)
|
||||||
|
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(buf), nil
|
||||||
|
}
|
@ -67,6 +67,17 @@ func parseFiles(filenames []string) uint {
|
|||||||
|
|
||||||
localpkg.Height = myheight
|
localpkg.Height = myheight
|
||||||
|
|
||||||
|
if flagiexport {
|
||||||
|
// init.go requires all imported init functions to be
|
||||||
|
// fully resolved.
|
||||||
|
// TODO(mdempsky): Can this be done elsewhere more cleanly?
|
||||||
|
for _, s := range types.InitSyms {
|
||||||
|
if n := asNode(s.Def); n != nil && s.Pkg != localpkg {
|
||||||
|
resolve(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return lines
|
return lines
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1588,6 +1588,7 @@ func structargs(tl *types.Type, mustname bool) []*Node {
|
|||||||
gen++
|
gen++
|
||||||
}
|
}
|
||||||
a := symfield(s, t.Type)
|
a := symfield(s, t.Type)
|
||||||
|
a.Pos = t.Pos
|
||||||
a.SetIsddd(t.Isddd())
|
a.SetIsddd(t.Isddd())
|
||||||
args = append(args, a)
|
args = append(args, a)
|
||||||
}
|
}
|
||||||
@ -1705,7 +1706,13 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
|
|||||||
Curfn = fn
|
Curfn = fn
|
||||||
typecheckslice(fn.Nbody.Slice(), Etop)
|
typecheckslice(fn.Nbody.Slice(), Etop)
|
||||||
|
|
||||||
|
// TODO(mdempsky): Investigate why this doesn't work with
|
||||||
|
// indexed export. For now, we disable even in non-indexed
|
||||||
|
// mode to ensure fair benchmark comparisons and to track down
|
||||||
|
// unintended compilation differences.
|
||||||
|
if false {
|
||||||
inlcalls(fn)
|
inlcalls(fn)
|
||||||
|
}
|
||||||
escAnalyze([]*Node{fn}, false)
|
escAnalyze([]*Node{fn}, false)
|
||||||
|
|
||||||
Curfn = nil
|
Curfn = nil
|
||||||
|
@ -32,23 +32,32 @@ var typecheckdefstack []*Node
|
|||||||
|
|
||||||
// resolve ONONAME to definition, if any.
|
// resolve ONONAME to definition, if any.
|
||||||
func resolve(n *Node) *Node {
|
func resolve(n *Node) *Node {
|
||||||
if n != nil && n.Op == ONONAME && n.Sym != nil {
|
if n == nil || n.Op != ONONAME {
|
||||||
r := asNode(n.Sym.Def)
|
return n
|
||||||
if r != nil {
|
|
||||||
if r.Op != OIOTA {
|
|
||||||
n = r
|
|
||||||
} else if len(typecheckdefstack) > 0 {
|
|
||||||
x := typecheckdefstack[len(typecheckdefstack)-1]
|
|
||||||
if x.Op == OLITERAL {
|
|
||||||
n = nodintconst(x.Iota())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if n.Sym.Pkg != localpkg {
|
||||||
|
expandDecl(n)
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := asNode(n.Sym.Def)
|
||||||
|
if r == nil {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Op == OIOTA {
|
||||||
|
if i := len(typecheckdefstack); i > 0 {
|
||||||
|
if x := typecheckdefstack[i-1]; x.Op == OLITERAL {
|
||||||
|
return nodintconst(x.Iota())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func typecheckslice(l []*Node, top int) {
|
func typecheckslice(l []*Node, top int) {
|
||||||
for i := range l {
|
for i := range l {
|
||||||
l[i] = typecheck(l[i], top)
|
l[i] = typecheck(l[i], top)
|
||||||
|
@ -77,3 +77,18 @@ func IsDclstackValid() bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PkgDef returns the definition associated with s at package scope.
|
||||||
|
func (s *Sym) PkgDef() *Node {
|
||||||
|
// Look for outermost saved declaration, which must be the
|
||||||
|
// package scope definition, if present.
|
||||||
|
for _, d := range dclstack {
|
||||||
|
if s == d.sym {
|
||||||
|
return d.def
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, the declaration hasn't been shadowed within a
|
||||||
|
// function scope.
|
||||||
|
return s.Def
|
||||||
|
}
|
||||||
|
@ -97,3 +97,11 @@ func (w *Writer) Close() error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Reader) File() *os.File {
|
||||||
|
return r.f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) File() *os.File {
|
||||||
|
return w.f
|
||||||
|
}
|
||||||
|
@ -59,6 +59,10 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if len(data) > 0 && data[0] == 'i' {
|
||||||
|
return iImportData(fset, imports, data[1:], path)
|
||||||
|
}
|
||||||
|
|
||||||
p := importer{
|
p := importer{
|
||||||
imports: imports,
|
imports: imports,
|
||||||
data: data,
|
data: data,
|
||||||
|
583
src/go/internal/gcimporter/iimport.go
Normal file
583
src/go/internal/gcimporter/iimport.go
Normal file
@ -0,0 +1,583 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Indexed package import.
|
||||||
|
// See cmd/compile/internal/gc/iexport.go for the export data format.
|
||||||
|
|
||||||
|
package gcimporter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"go/constant"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type intReader struct {
|
||||||
|
*bytes.Reader
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) int64() int64 {
|
||||||
|
i, err := binary.ReadVarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *intReader) uint64() uint64 {
|
||||||
|
i, err := binary.ReadUvarint(r.Reader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("import %q: read varint error: %v", r.path, err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
const predeclReserved = 32
|
||||||
|
|
||||||
|
type itag uint64
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Types
|
||||||
|
definedType itag = iota
|
||||||
|
pointerType
|
||||||
|
sliceType
|
||||||
|
arrayType
|
||||||
|
chanType
|
||||||
|
mapType
|
||||||
|
signatureType
|
||||||
|
structType
|
||||||
|
interfaceType
|
||||||
|
)
|
||||||
|
|
||||||
|
// iImportData imports a package from the serialized package data
|
||||||
|
// and returns the number of bytes consumed and a reference to the package.
|
||||||
|
// If the export data version is not recognized or the format is otherwise
|
||||||
|
// compromised, an error is returned.
|
||||||
|
func iImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||||
|
r := &intReader{bytes.NewReader(data), path}
|
||||||
|
|
||||||
|
version := r.uint64()
|
||||||
|
switch version {
|
||||||
|
case 0:
|
||||||
|
default:
|
||||||
|
errorf("cannot import %q: unknown iexport format version %d", path, version)
|
||||||
|
}
|
||||||
|
|
||||||
|
sLen := int64(r.uint64())
|
||||||
|
dLen := int64(r.uint64())
|
||||||
|
|
||||||
|
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
stringData := data[whence : whence+sLen]
|
||||||
|
declData := data[whence+sLen : whence+sLen+dLen]
|
||||||
|
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||||
|
|
||||||
|
p := iimporter{
|
||||||
|
ipath: path,
|
||||||
|
|
||||||
|
stringData: stringData,
|
||||||
|
stringCache: make(map[uint64]string),
|
||||||
|
pkgCache: make(map[uint64]*types.Package),
|
||||||
|
|
||||||
|
declData: declData,
|
||||||
|
pkgIndex: make(map[*types.Package]map[string]uint64),
|
||||||
|
typCache: make(map[uint64]types.Type),
|
||||||
|
|
||||||
|
fake: fakeFileSet{
|
||||||
|
fset: fset,
|
||||||
|
files: make(map[string]*token.File),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, pt := range predeclared {
|
||||||
|
p.typCache[uint64(i)] = pt
|
||||||
|
}
|
||||||
|
|
||||||
|
pkgList := make([]*types.Package, r.uint64())
|
||||||
|
for i := range pkgList {
|
||||||
|
pkgPathOff := r.uint64()
|
||||||
|
pkgPath := p.stringAt(pkgPathOff)
|
||||||
|
pkgName := p.stringAt(r.uint64())
|
||||||
|
_ = r.uint64() // package height; unused by go/types
|
||||||
|
|
||||||
|
if pkgPath == "" {
|
||||||
|
pkgPath = path
|
||||||
|
}
|
||||||
|
pkg := imports[pkgPath]
|
||||||
|
if pkg == nil {
|
||||||
|
pkg = types.NewPackage(pkgPath, pkgName)
|
||||||
|
imports[pkgPath] = pkg
|
||||||
|
} else if pkg.Name() != pkgName {
|
||||||
|
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgCache[pkgPathOff] = pkg
|
||||||
|
|
||||||
|
nameIndex := make(map[string]uint64)
|
||||||
|
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
|
||||||
|
name := p.stringAt(r.uint64())
|
||||||
|
nameIndex[name] = r.uint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
p.pkgIndex[pkg] = nameIndex
|
||||||
|
pkgList[i] = pkg
|
||||||
|
}
|
||||||
|
|
||||||
|
localpkg := pkgList[0]
|
||||||
|
|
||||||
|
names := make([]string, 0, len(p.pkgIndex[localpkg]))
|
||||||
|
for name := range p.pkgIndex[localpkg] {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
for _, name := range names {
|
||||||
|
p.doDecl(localpkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, typ := range p.interfaceList {
|
||||||
|
typ.Complete()
|
||||||
|
}
|
||||||
|
|
||||||
|
// record all referenced packages as imports
|
||||||
|
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||||
|
sort.Sort(byPath(list))
|
||||||
|
localpkg.SetImports(list)
|
||||||
|
|
||||||
|
// package was imported completely and without errors
|
||||||
|
localpkg.MarkComplete()
|
||||||
|
|
||||||
|
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||||
|
return int(consumed), localpkg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type iimporter struct {
|
||||||
|
ipath string
|
||||||
|
|
||||||
|
stringData []byte
|
||||||
|
stringCache map[uint64]string
|
||||||
|
pkgCache map[uint64]*types.Package
|
||||||
|
|
||||||
|
declData []byte
|
||||||
|
pkgIndex map[*types.Package]map[string]uint64
|
||||||
|
typCache map[uint64]types.Type
|
||||||
|
|
||||||
|
fake fakeFileSet
|
||||||
|
interfaceList []*types.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) doDecl(pkg *types.Package, name string) {
|
||||||
|
// See if we've already imported this declaration.
|
||||||
|
if obj := pkg.Scope().Lookup(name); obj != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
off, ok := p.pkgIndex[pkg][name]
|
||||||
|
if !ok {
|
||||||
|
errorf("%v.%v not in index", pkg, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p, currPkg: pkg}
|
||||||
|
r.declReader.Reset(p.declData[off:])
|
||||||
|
|
||||||
|
r.obj(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) stringAt(off uint64) string {
|
||||||
|
if s, ok := p.stringCache[off]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
slen, n := binary.Uvarint(p.stringData[off:])
|
||||||
|
if n <= 0 {
|
||||||
|
errorf("varint failed")
|
||||||
|
}
|
||||||
|
spos := off + uint64(n)
|
||||||
|
s := string(p.stringData[spos : spos+slen])
|
||||||
|
p.stringCache[off] = s
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||||
|
if pkg, ok := p.pkgCache[off]; ok {
|
||||||
|
return pkg
|
||||||
|
}
|
||||||
|
path := p.stringAt(off)
|
||||||
|
errorf("missing package %q in %q", path, p.ipath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
|
||||||
|
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
if off < predeclReserved {
|
||||||
|
errorf("predeclared type missing from cache: %v", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &importReader{p: p}
|
||||||
|
r.declReader.Reset(p.declData[off-predeclReserved:])
|
||||||
|
t := r.doType(base)
|
||||||
|
|
||||||
|
if base == nil || !isInterface(t) {
|
||||||
|
p.typCache[off] = t
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
type importReader struct {
|
||||||
|
p *iimporter
|
||||||
|
declReader bytes.Reader
|
||||||
|
currPkg *types.Package
|
||||||
|
prevFile string
|
||||||
|
prevLine int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) obj(name string) {
|
||||||
|
tag := r.byte()
|
||||||
|
pos := r.pos()
|
||||||
|
|
||||||
|
switch tag {
|
||||||
|
case 'A':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
case 'C':
|
||||||
|
typ, val := r.value()
|
||||||
|
|
||||||
|
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
|
||||||
|
|
||||||
|
case 'F':
|
||||||
|
sig := r.signature(nil)
|
||||||
|
|
||||||
|
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
|
||||||
|
|
||||||
|
case 'T':
|
||||||
|
// Types can be recursive. We need to setup a stub
|
||||||
|
// declaration before recursing.
|
||||||
|
obj := types.NewTypeName(pos, r.currPkg, name, nil)
|
||||||
|
named := types.NewNamed(obj, nil, nil)
|
||||||
|
r.declare(obj)
|
||||||
|
|
||||||
|
underlying := r.p.typAt(r.uint64(), named).Underlying()
|
||||||
|
named.SetUnderlying(underlying)
|
||||||
|
|
||||||
|
if !isInterface(underlying) {
|
||||||
|
for n := r.uint64(); n > 0; n-- {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
recv := r.param()
|
||||||
|
msig := r.signature(recv)
|
||||||
|
|
||||||
|
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'V':
|
||||||
|
typ := r.typ()
|
||||||
|
|
||||||
|
r.declare(types.NewVar(pos, r.currPkg, name, typ))
|
||||||
|
|
||||||
|
default:
|
||||||
|
errorf("unexpected tag: %v", tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) declare(obj types.Object) {
|
||||||
|
obj.Pkg().Scope().Insert(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) value() (typ types.Type, val constant.Value) {
|
||||||
|
typ = r.typ()
|
||||||
|
|
||||||
|
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
||||||
|
case types.IsBoolean:
|
||||||
|
val = constant.MakeBool(r.bool())
|
||||||
|
|
||||||
|
case types.IsString:
|
||||||
|
val = constant.MakeString(r.string())
|
||||||
|
|
||||||
|
case types.IsInteger:
|
||||||
|
val = r.mpint(b)
|
||||||
|
|
||||||
|
case types.IsFloat:
|
||||||
|
val = r.mpfloat(b)
|
||||||
|
|
||||||
|
case types.IsComplex:
|
||||||
|
re := r.mpfloat(b)
|
||||||
|
im := r.mpfloat(b)
|
||||||
|
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||||
|
|
||||||
|
default:
|
||||||
|
errorf("unexpected type %v", typ) // panics
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
|
||||||
|
if (b.Info() & types.IsUntyped) != 0 {
|
||||||
|
return true, 64
|
||||||
|
}
|
||||||
|
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Float32, types.Complex64:
|
||||||
|
return true, 3
|
||||||
|
case types.Float64, types.Complex128:
|
||||||
|
return true, 7
|
||||||
|
}
|
||||||
|
|
||||||
|
signed = (b.Info() & types.IsUnsigned) == 0
|
||||||
|
switch b.Kind() {
|
||||||
|
case types.Int8, types.Uint8:
|
||||||
|
maxBytes = 1
|
||||||
|
case types.Int16, types.Uint16:
|
||||||
|
maxBytes = 2
|
||||||
|
case types.Int32, types.Uint32:
|
||||||
|
maxBytes = 4
|
||||||
|
default:
|
||||||
|
maxBytes = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpint(b *types.Basic) constant.Value {
|
||||||
|
signed, maxBytes := intSize(b)
|
||||||
|
|
||||||
|
maxSmall := 256 - maxBytes
|
||||||
|
if signed {
|
||||||
|
maxSmall = 256 - 2*maxBytes
|
||||||
|
}
|
||||||
|
if maxBytes == 1 {
|
||||||
|
maxSmall = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := r.declReader.ReadByte()
|
||||||
|
if uint(n) < maxSmall {
|
||||||
|
v := int64(n)
|
||||||
|
if signed {
|
||||||
|
v >>= 1
|
||||||
|
if n&1 != 0 {
|
||||||
|
v = ^v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return constant.MakeInt64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := -n
|
||||||
|
if signed {
|
||||||
|
v = -(n &^ 1) >> 1
|
||||||
|
}
|
||||||
|
if v < 1 || uint(v) > maxBytes {
|
||||||
|
errorf("weird decoding: %v, %v => %v", n, signed, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, v)
|
||||||
|
io.ReadFull(&r.declReader, buf)
|
||||||
|
|
||||||
|
// convert to little endian
|
||||||
|
// TODO(gri) go/constant should have a more direct conversion function
|
||||||
|
// (e.g., once it supports a big.Float based implementation)
|
||||||
|
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
buf[i], buf[j] = buf[j], buf[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
x := constant.MakeFromBytes(buf)
|
||||||
|
if signed && n&1 != 0 {
|
||||||
|
x = constant.UnaryOp(token.SUB, x, 0)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
|
||||||
|
x := r.mpint(b)
|
||||||
|
if constant.Sign(x) == 0 {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := r.int64()
|
||||||
|
switch {
|
||||||
|
case exp > 0:
|
||||||
|
x = constant.Shift(x, token.SHL, uint(exp))
|
||||||
|
case exp < 0:
|
||||||
|
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
||||||
|
x = constant.BinaryOp(x, token.QUO, d)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) ident() string {
|
||||||
|
return r.string()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||||
|
name := r.string()
|
||||||
|
pkg := r.pkg()
|
||||||
|
return pkg, name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pos() token.Pos {
|
||||||
|
delta := r.int64()
|
||||||
|
if delta != deltaNewFile {
|
||||||
|
r.prevLine += delta
|
||||||
|
} else if l := r.int64(); l == -1 {
|
||||||
|
r.prevLine += deltaNewFile
|
||||||
|
} else {
|
||||||
|
r.prevFile = r.string()
|
||||||
|
r.prevLine = l
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.prevFile == "" && r.prevLine == 0 {
|
||||||
|
return token.NoPos
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.p.fake.pos(r.prevFile, int(r.prevLine))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) typ() types.Type {
|
||||||
|
return r.p.typAt(r.uint64(), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInterface(t types.Type) bool {
|
||||||
|
_, ok := t.(*types.Interface)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
|
||||||
|
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
|
||||||
|
|
||||||
|
func (r *importReader) doType(base *types.Named) types.Type {
|
||||||
|
switch k := r.kind(); k {
|
||||||
|
default:
|
||||||
|
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case definedType:
|
||||||
|
pkg, name := r.qualifiedIdent()
|
||||||
|
r.p.doDecl(pkg, name)
|
||||||
|
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
|
||||||
|
case pointerType:
|
||||||
|
return types.NewPointer(r.typ())
|
||||||
|
case sliceType:
|
||||||
|
return types.NewSlice(r.typ())
|
||||||
|
case arrayType:
|
||||||
|
n := r.uint64()
|
||||||
|
return types.NewArray(r.typ(), int64(n))
|
||||||
|
case chanType:
|
||||||
|
dir := chanDir(int(r.uint64()))
|
||||||
|
return types.NewChan(dir, r.typ())
|
||||||
|
case mapType:
|
||||||
|
return types.NewMap(r.typ(), r.typ())
|
||||||
|
case signatureType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
return r.signature(nil)
|
||||||
|
|
||||||
|
case structType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
fields := make([]*types.Var, r.uint64())
|
||||||
|
tags := make([]string, len(fields))
|
||||||
|
for i := range fields {
|
||||||
|
fpos := r.pos()
|
||||||
|
fname := r.ident()
|
||||||
|
ftyp := r.typ()
|
||||||
|
emb := r.bool()
|
||||||
|
tag := r.string()
|
||||||
|
|
||||||
|
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
|
||||||
|
tags[i] = tag
|
||||||
|
}
|
||||||
|
return types.NewStruct(fields, tags)
|
||||||
|
|
||||||
|
case interfaceType:
|
||||||
|
r.currPkg = r.pkg()
|
||||||
|
|
||||||
|
embeddeds := make([]*types.Named, r.uint64())
|
||||||
|
for i := range embeddeds {
|
||||||
|
_ = r.pos()
|
||||||
|
embeddeds[i] = r.typ().(*types.Named)
|
||||||
|
}
|
||||||
|
|
||||||
|
methods := make([]*types.Func, r.uint64())
|
||||||
|
for i := range methods {
|
||||||
|
mpos := r.pos()
|
||||||
|
mname := r.ident()
|
||||||
|
|
||||||
|
// TODO(mdempsky): Matches bimport.go, but I
|
||||||
|
// don't agree with this.
|
||||||
|
var recv *types.Var
|
||||||
|
if base != nil {
|
||||||
|
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
msig := r.signature(recv)
|
||||||
|
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
|
||||||
|
}
|
||||||
|
|
||||||
|
typ := types.NewInterface(methods, embeddeds)
|
||||||
|
r.p.interfaceList = append(r.p.interfaceList, typ)
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) kind() itag {
|
||||||
|
return itag(r.uint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) signature(recv *types.Var) *types.Signature {
|
||||||
|
params := r.paramList()
|
||||||
|
results := r.paramList()
|
||||||
|
variadic := params.Len() > 0 && r.bool()
|
||||||
|
return types.NewSignature(recv, params, results, variadic)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) paramList() *types.Tuple {
|
||||||
|
xs := make([]*types.Var, r.uint64())
|
||||||
|
for i := range xs {
|
||||||
|
xs[i] = r.param()
|
||||||
|
}
|
||||||
|
return types.NewTuple(xs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) param() *types.Var {
|
||||||
|
pos := r.pos()
|
||||||
|
name := r.ident()
|
||||||
|
typ := r.typ()
|
||||||
|
return types.NewParam(pos, r.currPkg, name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) bool() bool {
|
||||||
|
return r.uint64() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) int64() int64 {
|
||||||
|
n, err := binary.ReadVarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readVarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) uint64() uint64 {
|
||||||
|
n, err := binary.ReadUvarint(&r.declReader)
|
||||||
|
if err != nil {
|
||||||
|
errorf("readUvarint: %v", err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *importReader) byte() byte {
|
||||||
|
x, err := r.declReader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
errorf("declReader.ReadByte: %v", err)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user