mirror of
https://github.com/golang/go
synced 2024-11-24 23:07:56 -07:00
godoc: minor tweaks for app-engine use
- read search index files in groutine to avoid start-up failure on app engine because reading the files takes too long - permit usage of search index files and indexer - minor cosmetic cleanups R=dsymonds CC=golang-dev https://golang.org/cl/4952050
This commit is contained in:
parent
2517143957
commit
cd6f319a76
@ -17,9 +17,11 @@ const (
|
||||
// in the .zip file.
|
||||
zipGoroot = "/home/user/go"
|
||||
|
||||
// indexFilenames is a glob pattern specifying
|
||||
// files containing the search index served by
|
||||
// godoc. The files are concatenated in sorted
|
||||
// If indexFilenames != "", the search index is
|
||||
// initialized with the index stored in these
|
||||
// files (otherwise it will be built at run-time,
|
||||
// eventually). indexFilenames is a glob pattern;
|
||||
// the specified files are concatenated in sorted
|
||||
// order (by filename).
|
||||
// app-engine limit: file sizes must be <= 10MB;
|
||||
// use "split -b8m indexfile index.split." to get
|
||||
|
@ -88,9 +88,7 @@ func init() {
|
||||
|
||||
// initialize search index
|
||||
if *indexEnabled {
|
||||
if err := initIndex(); err != nil {
|
||||
log.Fatalf("error initializing index: %s", err)
|
||||
}
|
||||
go indexer()
|
||||
}
|
||||
|
||||
log.Println("godoc initialization complete")
|
||||
|
@ -1065,12 +1065,8 @@ func lookup(query string) (result SearchResult) {
|
||||
if *indexEnabled {
|
||||
if _, ts := fsModified.get(); timestamp < ts {
|
||||
// The index is older than the latest file system change under godoc's observation.
|
||||
if *indexFiles != "" {
|
||||
result.Alert = "Index not automatically updated: result may be inaccurate"
|
||||
} else {
|
||||
result.Alert = "Indexing in progress: result may be inaccurate"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result.Alert = "Search index disabled: no results available"
|
||||
}
|
||||
@ -1145,49 +1141,8 @@ func fsDirnames() <-chan string {
|
||||
return c
|
||||
}
|
||||
|
||||
func updateIndex() {
|
||||
if *verbose {
|
||||
log.Printf("updating index...")
|
||||
}
|
||||
start := time.Nanoseconds()
|
||||
index := NewIndex(fsDirnames(), *maxResults > 0, *indexThrottle)
|
||||
stop := time.Nanoseconds()
|
||||
searchIndex.set(index)
|
||||
if *verbose {
|
||||
secs := float64((stop-start)/1e6) / 1e3
|
||||
stats := index.Stats()
|
||||
log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
|
||||
secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
|
||||
}
|
||||
log.Printf("before GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
|
||||
runtime.GC()
|
||||
log.Printf("after GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
|
||||
}
|
||||
|
||||
func indexer() {
|
||||
for {
|
||||
if !indexUpToDate() {
|
||||
// index possibly out of date - make a new one
|
||||
updateIndex()
|
||||
}
|
||||
var delay int64 = 60 * 1e9 // by default, try every 60s
|
||||
if *testDir != "" {
|
||||
// in test mode, try once a second for fast startup
|
||||
delay = 1 * 1e9
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
func initIndex() os.Error {
|
||||
if *indexFiles == "" {
|
||||
// run periodic indexer
|
||||
go indexer()
|
||||
return nil
|
||||
}
|
||||
|
||||
// get search index from files
|
||||
matches, err := filepath.Glob(*indexFiles)
|
||||
func readIndex(filenames string) os.Error {
|
||||
matches, err := filepath.Glob(filenames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1208,3 +1163,45 @@ func initIndex() os.Error {
|
||||
searchIndex.set(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateIndex() {
|
||||
if *verbose {
|
||||
log.Printf("updating index...")
|
||||
}
|
||||
start := time.Nanoseconds()
|
||||
index := NewIndex(fsDirnames(), *maxResults > 0, *indexThrottle)
|
||||
stop := time.Nanoseconds()
|
||||
searchIndex.set(index)
|
||||
if *verbose {
|
||||
secs := float64((stop-start)/1e6) / 1e3
|
||||
stats := index.Stats()
|
||||
log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
|
||||
secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
|
||||
}
|
||||
log.Printf("before GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
|
||||
runtime.GC()
|
||||
log.Printf("after GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
|
||||
}
|
||||
|
||||
func indexer() {
|
||||
// initialize the index from disk if possible
|
||||
if *indexFiles != "" {
|
||||
if err := readIndex(*indexFiles); err != nil {
|
||||
log.Printf("error reading index: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// repeatedly update the index when it goes out of date
|
||||
for {
|
||||
if !indexUpToDate() {
|
||||
// index possibly out of date - make a new one
|
||||
updateIndex()
|
||||
}
|
||||
var delay int64 = 60 * 1e9 // by default, try every 60s
|
||||
if *testDir != "" {
|
||||
// in test mode, try once a second for fast startup
|
||||
delay = 1 * 1e9
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
@ -248,6 +248,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("%s: %s\n", *zipfile, err)
|
||||
}
|
||||
defer rc.Close() // be nice (e.g., -writeIndex mode)
|
||||
*goroot = path.Join("/", *goroot) // fsHttp paths are relative to '/'
|
||||
fs = NewZipFS(rc)
|
||||
fsHttp = NewHttpZipFS(rc, *goroot)
|
||||
@ -262,8 +263,9 @@ func main() {
|
||||
}
|
||||
|
||||
if *writeIndex {
|
||||
// Write search index and exit.
|
||||
if *indexFiles == "" {
|
||||
log.Fatal("no index files specified")
|
||||
log.Fatal("no index file specified")
|
||||
}
|
||||
|
||||
log.Println("initialize file systems")
|
||||
@ -342,9 +344,7 @@ func main() {
|
||||
|
||||
// Initialize search index.
|
||||
if *indexEnabled {
|
||||
if err := initIndex(); err != nil {
|
||||
log.Fatalf("error initializing index: %s", err)
|
||||
}
|
||||
go indexer()
|
||||
}
|
||||
|
||||
// Start http server.
|
||||
|
Loading…
Reference in New Issue
Block a user