2019-09-23 18:06:15 -06:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-15 16:07:52 -06:00
|
|
|
"os"
|
2019-09-27 11:17:59 -06:00
|
|
|
"sync"
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-15 11:27:09 -06:00
|
|
|
"golang.org/x/tools/go/analysis"
|
2019-10-15 16:07:52 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/protocol"
|
2019-09-27 11:17:59 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/source"
|
2019-09-23 18:06:15 -06:00
|
|
|
"golang.org/x/tools/internal/span"
|
|
|
|
)
|
|
|
|
|
|
|
|
type snapshot struct {
|
2019-09-27 11:17:59 -06:00
|
|
|
id uint64
|
|
|
|
view *view
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
// ids maps file URIs to package IDs.
|
|
|
|
// It may be invalidated on calls to go/packages.
|
|
|
|
ids map[span.URI][]packageID
|
|
|
|
|
|
|
|
// metadata maps file IDs to their associated metadata.
|
|
|
|
// It may invalidated on calls to go/packages.
|
2019-09-23 18:06:15 -06:00
|
|
|
metadata map[packageID]*metadata
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// importedBy maps package IDs to the list of packages that import them.
|
|
|
|
importedBy map[packageID][]packageID
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// files maps file URIs to their corresponding FileHandles.
|
|
|
|
// It may invalidated when a file's content changes.
|
|
|
|
files map[span.URI]source.FileHandle
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-14 14:13:06 -06:00
|
|
|
// packages maps a packageKey to a set of CheckPackageHandles to which that file belongs.
|
2019-09-27 11:17:59 -06:00
|
|
|
// It may be invalidated when a file's content changes.
|
2019-10-01 13:21:06 -06:00
|
|
|
packages map[packageKey]*checkPackageHandle
|
2019-10-14 14:13:06 -06:00
|
|
|
|
|
|
|
// actions maps an actionkey to its actionHandle.
|
|
|
|
actions map[actionKey]*actionHandle
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-10-15 11:27:09 -06:00
|
|
|
type packageKey struct {
|
|
|
|
mode source.ParseMode
|
|
|
|
id packageID
|
|
|
|
}
|
|
|
|
|
|
|
|
type actionKey struct {
|
|
|
|
pkg packageKey
|
|
|
|
analyzer *analysis.Analyzer
|
|
|
|
}
|
|
|
|
|
2019-10-04 15:18:43 -06:00
|
|
|
func (s *snapshot) View() source.View {
|
|
|
|
return s.view
|
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getImportedBy(id packageID) []packageID {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// If we haven't rebuilt the import graph since creating the snapshot.
|
|
|
|
if len(s.importedBy) == 0 {
|
|
|
|
s.rebuildImportGraph()
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.importedBy[id]
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
func (s *snapshot) addPackage(cph *checkPackageHandle) {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
// TODO: We should make sure not to compute duplicate CheckPackageHandles,
|
|
|
|
// and instead panic here. This will be hard to do because we may encounter
|
|
|
|
// the same package multiple times in the dependency tree.
|
|
|
|
if _, ok := s.packages[cph.packageKey()]; ok {
|
|
|
|
return
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
s.packages[cph.packageKey()] = cph
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
func (s *snapshot) getPackages(uri span.URI, m source.ParseMode) (cphs []source.CheckPackageHandle) {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
if ids, ok := s.ids[uri]; ok {
|
|
|
|
for _, id := range ids {
|
|
|
|
key := packageKey{
|
|
|
|
id: id,
|
|
|
|
mode: m,
|
|
|
|
}
|
|
|
|
cph, ok := s.packages[key]
|
|
|
|
if ok {
|
|
|
|
cphs = append(cphs, cph)
|
|
|
|
}
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
return cphs
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-11-01 15:59:28 -06:00
|
|
|
func (s *snapshot) KnownImportPaths() map[string]source.Package {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
results := map[string]source.Package{}
|
|
|
|
for _, cph := range s.packages {
|
|
|
|
cachedPkg, err := cph.cached()
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for importPath, newPkg := range cachedPkg.imports {
|
|
|
|
if oldPkg, ok := results[string(importPath)]; ok {
|
|
|
|
// Using the same trick as NarrowestPackageHandle, prefer non-variants.
|
|
|
|
if len(newPkg.files) < len(oldPkg.(*pkg).files) {
|
|
|
|
results[string(importPath)] = newPkg
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
results[string(importPath)] = newPkg
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
func (s *snapshot) getPackage(id packageID, m source.ParseMode) *checkPackageHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := packageKey{
|
|
|
|
id: id,
|
|
|
|
mode: m,
|
|
|
|
}
|
|
|
|
return s.packages[key]
|
|
|
|
}
|
|
|
|
|
2019-10-24 13:44:41 -06:00
|
|
|
func (s *snapshot) getActionHandles(id packageID, m source.ParseMode) []*actionHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
var acts []*actionHandle
|
|
|
|
for k, v := range s.actions {
|
|
|
|
if k.pkg.id == id && k.pkg.mode == m {
|
|
|
|
acts = append(acts, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return acts
|
|
|
|
}
|
|
|
|
|
2019-10-15 11:27:09 -06:00
|
|
|
func (s *snapshot) getAction(id packageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
|
2019-10-14 14:13:06 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := actionKey{
|
|
|
|
pkg: packageKey{
|
|
|
|
id: id,
|
|
|
|
mode: m,
|
|
|
|
},
|
2019-10-15 11:27:09 -06:00
|
|
|
analyzer: a,
|
2019-10-14 14:13:06 -06:00
|
|
|
}
|
|
|
|
return s.actions[key]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) addAction(ah *actionHandle) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := actionKey{
|
2019-10-15 11:27:09 -06:00
|
|
|
analyzer: ah.analyzer,
|
2019-10-14 14:13:06 -06:00
|
|
|
pkg: packageKey{
|
|
|
|
id: ah.pkg.id,
|
|
|
|
mode: ah.pkg.mode,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if _, ok := s.actions[key]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.actions[key] = ah
|
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getMetadataForURI(uri span.URI) (metadata []*metadata) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
for _, id := range s.ids[uri] {
|
|
|
|
if m, ok := s.metadata[id]; ok {
|
|
|
|
metadata = append(metadata, m)
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
return metadata
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) setMetadata(m *metadata) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
// TODO: We should make sure not to set duplicate metadata,
|
|
|
|
// and instead panic here. This can be done by making sure not to
|
|
|
|
// reset metadata information for packages we've already seen.
|
|
|
|
if _, ok := s.metadata[m.id]; ok {
|
|
|
|
return
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
s.metadata[m.id] = m
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getMetadata(id packageID) *metadata {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.metadata[id]
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) addID(uri span.URI, id packageID) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
for _, existingID := range s.ids[uri] {
|
|
|
|
if existingID == id {
|
|
|
|
// TODO: We should make sure not to set duplicate IDs,
|
|
|
|
// and instead panic here. This can be done by making sure not to
|
|
|
|
// reset metadata information for packages we've already seen.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
s.ids[uri] = append(s.ids[uri], id)
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getIDs(uri span.URI) []packageID {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
return s.ids[uri]
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getFile(uri span.URI) source.FileHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.files[uri]
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) Handle(ctx context.Context, f source.File) source.FileHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
if _, ok := s.files[f.URI()]; !ok {
|
|
|
|
s.files[f.URI()] = s.view.session.GetFile(f.URI(), f.Kind())
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.files[f.URI()]
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-10-01 13:21:06 -06:00
|
|
|
func (s *snapshot) clone(ctx context.Context, withoutURI *span.URI, withoutTypes, withoutMetadata map[span.URI]struct{}) *snapshot {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2019-09-23 18:06:15 -06:00
|
|
|
result := &snapshot{
|
2019-09-27 11:17:59 -06:00
|
|
|
id: s.id + 1,
|
|
|
|
view: s.view,
|
|
|
|
ids: make(map[span.URI][]packageID),
|
|
|
|
importedBy: make(map[packageID][]packageID),
|
2019-10-01 13:21:06 -06:00
|
|
|
metadata: make(map[packageID]*metadata),
|
|
|
|
packages: make(map[packageKey]*checkPackageHandle),
|
2019-10-14 14:13:06 -06:00
|
|
|
actions: make(map[actionKey]*actionHandle),
|
2019-09-27 11:17:59 -06:00
|
|
|
files: make(map[span.URI]source.FileHandle),
|
|
|
|
}
|
|
|
|
// Copy all of the FileHandles except for the one that was invalidated.
|
|
|
|
for k, v := range s.files {
|
2019-10-01 13:21:06 -06:00
|
|
|
if withoutURI != nil && k == *withoutURI {
|
2019-09-27 11:17:59 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.files[k] = v
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Collect the IDs for the packages associated with the excluded URIs.
|
|
|
|
withoutMetadataIDs := make(map[packageID]struct{})
|
|
|
|
withoutTypesIDs := make(map[packageID]struct{})
|
|
|
|
for k, ids := range s.ids {
|
|
|
|
// Map URIs to IDs for exclusion.
|
2019-09-27 11:17:59 -06:00
|
|
|
if withoutTypes != nil {
|
|
|
|
if _, ok := withoutTypes[k]; ok {
|
2019-10-01 13:21:06 -06:00
|
|
|
for _, id := range ids {
|
|
|
|
withoutTypesIDs[id] = struct{}{}
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
if withoutMetadata != nil {
|
|
|
|
if _, ok := withoutMetadata[k]; ok {
|
|
|
|
for _, id := range ids {
|
2019-10-01 13:21:06 -06:00
|
|
|
withoutMetadataIDs[id] = struct{}{}
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
|
|
|
continue
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
result.ids[k] = ids
|
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Copy the package type information.
|
|
|
|
for k, v := range s.packages {
|
|
|
|
if _, ok := withoutTypesIDs[k.id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := withoutMetadataIDs[k.id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.packages[k] = v
|
|
|
|
}
|
2019-10-14 14:13:06 -06:00
|
|
|
// Copy the package analysis information.
|
|
|
|
for k, v := range s.actions {
|
|
|
|
if _, ok := withoutTypesIDs[k.pkg.id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := withoutMetadataIDs[k.pkg.id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.actions[k] = v
|
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Copy the package metadata.
|
2019-09-23 18:06:15 -06:00
|
|
|
for k, v := range s.metadata {
|
2019-10-01 13:21:06 -06:00
|
|
|
if _, ok := withoutMetadataIDs[k]; ok {
|
2019-09-23 18:06:15 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.metadata[k] = v
|
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Don't bother copying the importedBy graph,
|
|
|
|
// as it changes each time we update metadata.
|
2019-09-23 18:06:15 -06:00
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// invalidateContent invalidates the content of a Go file,
|
|
|
|
// including any position and type information that depends on it.
|
2019-10-15 16:07:52 -06:00
|
|
|
func (v *view) invalidateContent(ctx context.Context, f source.File, kind source.FileKind, changeType protocol.FileChangeType) bool {
|
|
|
|
var (
|
|
|
|
withoutTypes = make(map[span.URI]struct{})
|
|
|
|
withoutMetadata = make(map[span.URI]struct{})
|
|
|
|
ids = make(map[packageID]struct{})
|
|
|
|
)
|
2019-09-27 11:17:59 -06:00
|
|
|
|
|
|
|
// This should be the only time we hold the view's snapshot lock for any period of time.
|
|
|
|
v.snapshotMu.Lock()
|
|
|
|
defer v.snapshotMu.Unlock()
|
|
|
|
|
2019-10-15 16:07:52 -06:00
|
|
|
for _, id := range v.snapshot.getIDs(f.URI()) {
|
|
|
|
ids[id] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch changeType {
|
|
|
|
case protocol.Created:
|
|
|
|
// If this is a file we don't yet know about,
|
|
|
|
// then we do not yet know what packages it should belong to.
|
|
|
|
// Make a rough estimate of what metadata to invalidate by finding the package IDs
|
|
|
|
// of all of the files in the same directory as this one.
|
|
|
|
// TODO(rstambler): Speed this up by mapping directories to filenames.
|
|
|
|
if dirStat, err := os.Stat(dir(f.URI().Filename())); err == nil {
|
|
|
|
for uri := range v.snapshot.files {
|
|
|
|
if fdirStat, err := os.Stat(dir(uri.Filename())); err == nil {
|
|
|
|
if os.SameFile(dirStat, fdirStat) {
|
|
|
|
for _, id := range v.snapshot.ids[uri] {
|
|
|
|
ids[id] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ids) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
|
|
|
|
// Remove the package and all of its reverse dependencies from the cache.
|
2019-10-15 16:07:52 -06:00
|
|
|
for id := range ids {
|
2019-09-27 11:17:59 -06:00
|
|
|
v.snapshot.reverseDependencies(id, withoutTypes, map[packageID]struct{}{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the original FileHandle for the URI, if it exists.
|
2019-10-15 16:07:52 -06:00
|
|
|
originalFH := v.snapshot.getFile(f.URI())
|
|
|
|
|
|
|
|
// Make sure to clear out the content if there has been a deletion.
|
|
|
|
if changeType == protocol.Deleted {
|
|
|
|
v.session.clearOverlay(f.URI())
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// Get the current FileHandle for the URI.
|
2019-10-15 16:07:52 -06:00
|
|
|
currentFH := v.session.GetFile(f.URI(), kind)
|
2019-09-27 11:17:59 -06:00
|
|
|
|
|
|
|
// Check if the file's package name or imports have changed,
|
|
|
|
// and if so, invalidate metadata.
|
|
|
|
if v.session.cache.shouldLoad(ctx, v.snapshot, originalFH, currentFH) {
|
|
|
|
withoutMetadata = withoutTypes
|
|
|
|
|
|
|
|
// TODO: If a package's name has changed,
|
|
|
|
// we should invalidate the metadata for the new package name (if it exists).
|
|
|
|
}
|
2019-10-15 16:07:52 -06:00
|
|
|
uri := f.URI()
|
2019-10-01 13:21:06 -06:00
|
|
|
v.snapshot = v.snapshot.clone(ctx, &uri, withoutTypes, withoutMetadata)
|
2019-10-15 16:07:52 -06:00
|
|
|
return true
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// reverseDependencies populates the uris map with file URIs belonging to the
|
|
|
|
// provided package and its transitive reverse dependencies.
|
|
|
|
func (s *snapshot) reverseDependencies(id packageID, uris map[span.URI]struct{}, seen map[packageID]struct{}) {
|
2019-09-23 18:06:15 -06:00
|
|
|
if _, ok := seen[id]; ok {
|
|
|
|
return
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
m := s.getMetadata(id)
|
|
|
|
if m == nil {
|
2019-09-23 18:06:15 -06:00
|
|
|
return
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
seen[id] = struct{}{}
|
|
|
|
importedBy := s.getImportedBy(id)
|
|
|
|
for _, parentID := range importedBy {
|
|
|
|
s.reverseDependencies(parentID, uris, seen)
|
|
|
|
}
|
|
|
|
for _, uri := range m.files {
|
|
|
|
uris[uri] = struct{}{}
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) clearAndRebuildImportGraph() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Completely invalidate the original map.
|
|
|
|
s.importedBy = make(map[packageID][]packageID)
|
|
|
|
s.rebuildImportGraph()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) rebuildImportGraph() {
|
|
|
|
for id, m := range s.metadata {
|
|
|
|
for _, importID := range m.deps {
|
|
|
|
s.importedBy[importID] = append(s.importedBy[importID], id)
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
}
|