2019-08-02 12:05:22 -06:00
|
|
|
package imports
|
|
|
|
|
|
|
|
import (
|
2019-11-04 12:02:48 -07:00
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-08-02 12:05:22 -06:00
|
|
|
"sync"
|
2019-10-09 17:05:30 -06:00
|
|
|
|
|
|
|
"golang.org/x/tools/internal/gopathwalk"
|
2019-08-02 12:05:22 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
// To find packages to import, the resolver needs to know about all of the
|
|
|
|
// the packages that could be imported. This includes packages that are
|
|
|
|
// already in modules that are in (1) the current module, (2) replace targets,
|
|
|
|
// and (3) packages in the module cache. Packages in (1) and (2) may change over
|
|
|
|
// time, as the client may edit the current module and locally replaced modules.
|
|
|
|
// The module cache (which includes all of the packages in (3)) can only
|
|
|
|
// ever be added to.
|
|
|
|
//
|
|
|
|
// The resolver can thus save state about packages in the module cache
|
|
|
|
// and guarantee that this will not change over time. To obtain information
|
|
|
|
// about new modules added to the module cache, the module cache should be
|
|
|
|
// rescanned.
|
|
|
|
//
|
|
|
|
// It is OK to serve information about modules that have been deleted,
|
|
|
|
// as they do still exist.
|
|
|
|
// TODO(suzmue): can we share information with the caller about
|
|
|
|
// what module needs to be downloaded to import this package?
|
|
|
|
|
|
|
|
type directoryPackageStatus int
|
|
|
|
|
|
|
|
const (
|
|
|
|
_ directoryPackageStatus = iota
|
|
|
|
directoryScanned
|
2019-10-04 13:38:18 -06:00
|
|
|
nameLoaded
|
2019-11-04 12:02:48 -07:00
|
|
|
exportsLoaded
|
2019-08-02 12:05:22 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
type directoryPackageInfo struct {
|
|
|
|
// status indicates the extent to which this struct has been filled in.
|
|
|
|
status directoryPackageStatus
|
|
|
|
// err is non-nil when there was an error trying to reach status.
|
|
|
|
err error
|
|
|
|
|
2019-10-04 13:38:18 -06:00
|
|
|
// Set when status >= directoryScanned.
|
2019-08-02 12:05:22 -06:00
|
|
|
|
|
|
|
// dir is the absolute directory of this package.
|
2019-10-09 17:05:30 -06:00
|
|
|
dir string
|
|
|
|
rootType gopathwalk.RootType
|
|
|
|
// nonCanonicalImportPath is the package's expected import path. It may
|
|
|
|
// not actually be importable at that path.
|
2019-08-02 12:05:22 -06:00
|
|
|
nonCanonicalImportPath string
|
2019-10-04 13:38:18 -06:00
|
|
|
|
2019-10-21 15:48:25 -06:00
|
|
|
// Module-related information.
|
|
|
|
moduleDir string // The directory that is the module root of this dir.
|
|
|
|
moduleName string // The module name that contains this dir.
|
|
|
|
|
2019-10-04 13:38:18 -06:00
|
|
|
// Set when status >= nameLoaded.
|
|
|
|
|
|
|
|
packageName string // the package name, as declared in the source.
|
2019-11-04 12:02:48 -07:00
|
|
|
|
|
|
|
// Set when status >= exportsLoaded.
|
|
|
|
|
|
|
|
exports []string
|
2019-08-02 12:05:22 -06:00
|
|
|
}
|
|
|
|
|
2019-07-19 13:21:23 -06:00
|
|
|
// reachedStatus returns true when info has a status at least target and any error associated with
|
|
|
|
// an attempt to reach target.
|
|
|
|
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
|
|
|
|
if info.err == nil {
|
|
|
|
return info.status >= target, nil
|
|
|
|
}
|
|
|
|
if info.status == target {
|
|
|
|
return true, info.err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2019-10-09 17:05:30 -06:00
|
|
|
// dirInfoCache is a concurrency safe map for storing information about
|
|
|
|
// directories that may contain packages.
|
2019-08-02 12:05:22 -06:00
|
|
|
//
|
|
|
|
// The information in this cache is built incrementally. Entries are initialized in scan.
|
|
|
|
// No new keys should be added in any other functions, as all directories containing
|
|
|
|
// packages are identified in scan.
|
|
|
|
//
|
|
|
|
// Other functions, including loadExports and findPackage, may update entries in this cache
|
|
|
|
// as they discover new things about the directory.
|
|
|
|
//
|
2019-10-09 17:05:30 -06:00
|
|
|
// The information in the cache is not expected to change for the cache's
|
|
|
|
// lifetime, so there is no protection against competing writes. Users should
|
|
|
|
// take care not to hold the cache across changes to the underlying files.
|
2019-08-02 12:05:22 -06:00
|
|
|
//
|
|
|
|
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
2019-10-09 17:05:30 -06:00
|
|
|
type dirInfoCache struct {
|
2019-08-02 12:05:22 -06:00
|
|
|
mu sync.Mutex
|
2019-10-09 17:05:30 -06:00
|
|
|
// dirs stores information about packages in directories, keyed by absolute path.
|
2020-01-02 14:10:45 -07:00
|
|
|
dirs map[string]*directoryPackageInfo
|
|
|
|
listeners map[*int]cacheListener
|
|
|
|
}
|
|
|
|
|
|
|
|
type cacheListener func(directoryPackageInfo)
|
|
|
|
|
|
|
|
// ScanAndListen calls listener on all the items in the cache, and on anything
|
|
|
|
// newly added. The returned stop function waits for all in-flight callbacks to
|
|
|
|
// finish and blocks new ones.
|
|
|
|
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
// Flushing out all the callbacks is tricky without knowing how many there
|
|
|
|
// are going to be. Setting an arbitrary limit makes it much easier.
|
|
|
|
const maxInFlight = 10
|
|
|
|
sema := make(chan struct{}, maxInFlight)
|
|
|
|
for i := 0; i < maxInFlight; i++ {
|
|
|
|
sema <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
cookie := new(int) // A unique ID we can use for the listener.
|
|
|
|
|
|
|
|
// We can't hold mu while calling the listener.
|
|
|
|
d.mu.Lock()
|
|
|
|
var keys []string
|
|
|
|
for key := range d.dirs {
|
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
d.listeners[cookie] = func(info directoryPackageInfo) {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-sema:
|
|
|
|
}
|
|
|
|
listener(info)
|
|
|
|
sema <- struct{}{}
|
|
|
|
}
|
|
|
|
d.mu.Unlock()
|
|
|
|
|
2020-02-03 22:32:14 -07:00
|
|
|
stop := func() {
|
|
|
|
cancel()
|
|
|
|
d.mu.Lock()
|
|
|
|
delete(d.listeners, cookie)
|
|
|
|
d.mu.Unlock()
|
|
|
|
for i := 0; i < maxInFlight; i++ {
|
|
|
|
<-sema
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-02 14:10:45 -07:00
|
|
|
// Process the pre-existing keys.
|
|
|
|
for _, k := range keys {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-02-03 22:32:14 -07:00
|
|
|
return stop
|
2020-01-02 14:10:45 -07:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
if v, ok := d.Load(k); ok {
|
|
|
|
listener(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-03 22:32:14 -07:00
|
|
|
return stop
|
2019-08-02 12:05:22 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store stores the package info for dir.
|
2019-10-09 17:05:30 -06:00
|
|
|
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
2019-08-02 12:05:22 -06:00
|
|
|
d.mu.Lock()
|
2020-01-02 14:10:45 -07:00
|
|
|
_, old := d.dirs[dir]
|
|
|
|
d.dirs[dir] = &info
|
|
|
|
var listeners []cacheListener
|
|
|
|
for _, l := range d.listeners {
|
|
|
|
listeners = append(listeners, l)
|
|
|
|
}
|
|
|
|
d.mu.Unlock()
|
|
|
|
|
|
|
|
if !old {
|
|
|
|
for _, l := range listeners {
|
|
|
|
l(info)
|
|
|
|
}
|
|
|
|
}
|
2019-08-02 12:05:22 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
2019-10-09 17:05:30 -06:00
|
|
|
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
2019-08-02 12:05:22 -06:00
|
|
|
d.mu.Lock()
|
|
|
|
defer d.mu.Unlock()
|
2019-10-09 17:05:30 -06:00
|
|
|
info, ok := d.dirs[dir]
|
2019-08-02 12:05:22 -06:00
|
|
|
if !ok {
|
|
|
|
return directoryPackageInfo{}, false
|
|
|
|
}
|
|
|
|
return *info, true
|
|
|
|
}
|
2019-07-19 10:41:29 -06:00
|
|
|
|
|
|
|
// Keys returns the keys currently present in d.
|
2019-10-09 17:05:30 -06:00
|
|
|
func (d *dirInfoCache) Keys() (keys []string) {
|
2019-07-19 10:41:29 -06:00
|
|
|
d.mu.Lock()
|
|
|
|
defer d.mu.Unlock()
|
2019-10-09 17:05:30 -06:00
|
|
|
for key := range d.dirs {
|
2019-07-19 10:41:29 -06:00
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
return keys
|
|
|
|
}
|
2019-11-04 12:02:48 -07:00
|
|
|
|
2019-12-26 17:13:58 -07:00
|
|
|
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
2019-11-04 12:02:48 -07:00
|
|
|
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
2019-12-26 17:13:58 -07:00
|
|
|
return info.packageName, err
|
2019-11-04 12:02:48 -07:00
|
|
|
}
|
|
|
|
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
2019-12-26 17:13:58 -07:00
|
|
|
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
|
2019-11-04 12:02:48 -07:00
|
|
|
}
|
|
|
|
info.packageName, info.err = packageDirToName(info.dir)
|
|
|
|
info.status = nameLoaded
|
|
|
|
d.Store(info.dir, info)
|
2019-12-26 17:13:58 -07:00
|
|
|
return info.packageName, info.err
|
2019-11-04 12:02:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
|
|
|
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
|
|
|
return info.packageName, info.exports, info.err
|
|
|
|
}
|
|
|
|
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
2020-01-03 16:01:15 -07:00
|
|
|
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
|
2019-12-26 17:13:58 -07:00
|
|
|
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
|
2019-11-04 12:02:48 -07:00
|
|
|
return info.packageName, info.exports, info.err
|
|
|
|
}
|
|
|
|
// The cache structure wants things to proceed linearly. We can skip a
|
|
|
|
// step here, but only if we succeed.
|
|
|
|
if info.status == nameLoaded || info.err == nil {
|
|
|
|
info.status = exportsLoaded
|
|
|
|
} else {
|
|
|
|
info.status = nameLoaded
|
|
|
|
}
|
|
|
|
d.Store(info.dir, info)
|
|
|
|
return info.packageName, info.exports, info.err
|
|
|
|
}
|