mirror of
https://github.com/golang/go
synced 2024-11-25 06:47:56 -07:00
dashboard: lots of caching to avoid datastore queries
reorganize benchmark computation so that it is incremental. if it times out, it doesn't lose the pieces it already computed, so that next time it has a fighting chance to finish. R=agl1, agl CC=golang-dev https://golang.org/cl/216046
This commit is contained in:
parent
32c3c95339
commit
666abfb64c
@ -6,6 +6,7 @@
|
|||||||
# by AppEngine.
|
# by AppEngine.
|
||||||
|
|
||||||
from google.appengine.api import memcache
|
from google.appengine.api import memcache
|
||||||
|
from google.appengine.runtime import DeadlineExceededError
|
||||||
from google.appengine.ext import db
|
from google.appengine.ext import db
|
||||||
from google.appengine.ext import webapp
|
from google.appengine.ext import webapp
|
||||||
from google.appengine.ext.webapp import template
|
from google.appengine.ext.webapp import template
|
||||||
@ -76,6 +77,15 @@ def builderInfo(b):
|
|||||||
note = f[2]
|
note = f[2]
|
||||||
return {'name': b, 'goos': goos, 'goarch': goarch, 'note': note}
|
return {'name': b, 'goos': goos, 'goarch': goarch, 'note': note}
|
||||||
|
|
||||||
|
def builderset():
|
||||||
|
q = Commit.all()
|
||||||
|
q.order('-__key__')
|
||||||
|
results = q.fetch(N)
|
||||||
|
builders = set()
|
||||||
|
for c in results:
|
||||||
|
builders.update(set(parseBuild(build)['builder'] for build in c.builds))
|
||||||
|
return builders
|
||||||
|
|
||||||
class MainPage(webapp.RequestHandler):
|
class MainPage(webapp.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
||||||
@ -108,30 +118,32 @@ class MainPage(webapp.RequestHandler):
|
|||||||
class GetHighwater(webapp.RequestHandler):
|
class GetHighwater(webapp.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
builder = self.request.get('builder')
|
builder = self.request.get('builder')
|
||||||
|
|
||||||
hw = Highwater.get_by_key_name('hw-%s' % builder)
|
key = 'hw-%s' % builder
|
||||||
if hw is None:
|
node = memcache.get(key)
|
||||||
# If no highwater has been recorded for this builder,
|
if node is None:
|
||||||
# we go back N+1 commits and return that.
|
hw = Highwater.get_by_key_name('hw-%s' % builder)
|
||||||
q = Commit.all()
|
if hw is None:
|
||||||
q.order('-__key__')
|
# If no highwater has been recorded for this builder,
|
||||||
c = q.fetch(N+1)[-1]
|
# we go back N+1 commits and return that.
|
||||||
self.response.set_status(200)
|
q = Commit.all()
|
||||||
self.response.out.write(c.node)
|
q.order('-__key__')
|
||||||
return
|
c = q.fetch(N+1)[-1]
|
||||||
|
node = c.node
|
||||||
# if the proposed hw is too old, bump it forward
|
else:
|
||||||
node = hw.commit
|
# if the proposed hw is too old, bump it forward
|
||||||
found = False
|
node = hw.commit
|
||||||
q = Commit.all()
|
found = False
|
||||||
q.order('-__key__')
|
q = Commit.all()
|
||||||
recent = q.fetch(N+1)
|
q.order('-__key__')
|
||||||
for c in recent:
|
recent = q.fetch(N+1)
|
||||||
if c.node == node:
|
for c in recent:
|
||||||
found = True
|
if c.node == node:
|
||||||
break
|
found = True
|
||||||
if not found:
|
break
|
||||||
node = recent[-1].node
|
if not found:
|
||||||
|
node = recent[-1].node
|
||||||
|
memcache.set(key, node, 3600)
|
||||||
self.response.set_status(200)
|
self.response.set_status(200)
|
||||||
self.response.out.write(node)
|
self.response.out.write(node)
|
||||||
|
|
||||||
@ -166,7 +178,9 @@ class SetHighwater(webapp.RequestHandler):
|
|||||||
if not found:
|
if not found:
|
||||||
c = recent[-1]
|
c = recent[-1]
|
||||||
|
|
||||||
hw = Highwater(key_name = 'hw-%s' % builder)
|
key = 'hw-%s' % builder
|
||||||
|
memcache.delete(key)
|
||||||
|
hw = Highwater(key_name = key)
|
||||||
hw.commit = c.node
|
hw.commit = c.node
|
||||||
hw.put()
|
hw.put()
|
||||||
|
|
||||||
@ -270,6 +284,8 @@ class Build(webapp.RequestHandler):
|
|||||||
hw = Highwater(key_name = 'hw-%s' % builder)
|
hw = Highwater(key_name = 'hw-%s' % builder)
|
||||||
hw.commit = node
|
hw.commit = node
|
||||||
hw.put()
|
hw.put()
|
||||||
|
memcache.delete('hw')
|
||||||
|
memcache.delete('bench')
|
||||||
|
|
||||||
self.response.set_status(200)
|
self.response.set_status(200)
|
||||||
|
|
||||||
@ -295,95 +311,91 @@ class Benchmarks(webapp.RequestHandler):
|
|||||||
return self.json()
|
return self.json()
|
||||||
self.response.set_status(200)
|
self.response.set_status(200)
|
||||||
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
||||||
q = Commit.all()
|
page = memcache.get('bench')
|
||||||
q.order('-__key__')
|
|
||||||
n = q.fetch(1)[0]
|
|
||||||
key = "bench(%d)" % n.num
|
|
||||||
page = None # memcache.get(key)
|
|
||||||
if not page:
|
if not page:
|
||||||
page = self.compute()
|
num = memcache.get('hw')
|
||||||
memcache.set(key, page, 3600)
|
if num is None:
|
||||||
|
q = Commit.all()
|
||||||
|
q.order('-__key__')
|
||||||
|
n = q.fetch(1)[0]
|
||||||
|
memcache.set('hw', num)
|
||||||
|
page, full = self.compute(n.num)
|
||||||
|
if full:
|
||||||
|
memcache.set('bench', page, 3600)
|
||||||
self.response.out.write(page)
|
self.response.out.write(page)
|
||||||
|
|
||||||
def compute(self):
|
def compute(self, num):
|
||||||
q = Benchmark.all()
|
q = Benchmark.all()
|
||||||
q.filter('__key__ >', Benchmark.get_or_insert('v002.').key())
|
q.filter('__key__ >', Benchmark.get_or_insert('v002.').key())
|
||||||
bs = q.fetch(10000)
|
benchmarks = q.fetch(10000)
|
||||||
|
|
||||||
# Collect table giving all the data we need.
|
|
||||||
builders = {}
|
|
||||||
data = {}
|
|
||||||
for b in bs:
|
|
||||||
# TODO(rsc): Will want to limit benchmarks to a certain
|
|
||||||
# number of commits eventually, but there aren't enough
|
|
||||||
# commits yet to worry.
|
|
||||||
q = BenchmarkResult.all()
|
|
||||||
q.ancestor(b)
|
|
||||||
q.order('-__key__')
|
|
||||||
results = q.fetch(10000)
|
|
||||||
m = {}
|
|
||||||
revs = {}
|
|
||||||
for r in results:
|
|
||||||
if r.builder not in m:
|
|
||||||
m[r.builder] = {}
|
|
||||||
m[r.builder][r.num] = r.nsperop
|
|
||||||
revs[r.num] = 0
|
|
||||||
builders[r.builder] = 0
|
|
||||||
data[b.name] = m
|
|
||||||
|
|
||||||
builders = list(builders.keys())
|
# Which builders have sent benchmarks recently?
|
||||||
|
builders = set()
|
||||||
|
q = BenchmarkResult.all()
|
||||||
|
q.ancestor(benchmarks[0])
|
||||||
|
q.order('-__key__')
|
||||||
|
for r in q.fetch(50):
|
||||||
|
builders.add(r.builder)
|
||||||
|
builders = list(builders)
|
||||||
builders.sort()
|
builders.sort()
|
||||||
|
|
||||||
revs = list(revs.keys())
|
NB = 80
|
||||||
revs.sort()
|
last = num
|
||||||
first = revs[0]
|
first = num+1 - NB
|
||||||
last = revs[-1]
|
|
||||||
if len(revs) > 80: # At most 80 commits back
|
|
||||||
last = revs[-80]
|
|
||||||
|
|
||||||
names = list(data.keys())
|
|
||||||
names.sort()
|
|
||||||
|
|
||||||
# Build list of rows, one per benchmark
|
# Build list of rows, one per benchmark
|
||||||
benchmarks = []
|
rows = [{"name": bm.name, "builds": [{"url": ""} for b in builders]} for bm in benchmarks]
|
||||||
for name in names:
|
|
||||||
# Build list of cells, one per builder.
|
full = True
|
||||||
m = data[name]
|
try:
|
||||||
builds = []
|
for i in range(len(rows)):
|
||||||
for builder in builders:
|
data = None
|
||||||
# Build cell: a URL for the chart server or an empty string.
|
bm = benchmarks[i]
|
||||||
if builder not in m:
|
builds = rows[i]["builds"]
|
||||||
builds.append({"url":""})
|
all = None
|
||||||
continue
|
for j in range(len(builders)):
|
||||||
d = m[builder]
|
cell = builds[j]
|
||||||
max = 0
|
b = builders[j]
|
||||||
tot = 0
|
# Build cell: a URL for the chart server or an empty string.
|
||||||
ntot = 0
|
# Cache individual graphs because they're so damn expensive.
|
||||||
for i in range(first, last+1):
|
key = "bench(%s,%s,%d)" % (bm.name, b, num)
|
||||||
if i not in d:
|
url = memcache.get(key)
|
||||||
|
if url is not None:
|
||||||
|
cell["url"] = url
|
||||||
continue
|
continue
|
||||||
val = d[i]
|
|
||||||
if max < val:
|
# Page in all data for benchmark for all builders,
|
||||||
max = val
|
# on demand. It might be faster to ask for just the
|
||||||
tot += val
|
# builder that we need, but q.filter('builder = ', b) is
|
||||||
ntot += 1
|
# broken right now (the index is corrupt).
|
||||||
if max == 0:
|
if all is None:
|
||||||
builds.append({"url":""})
|
q = BenchmarkResult.all()
|
||||||
continue
|
q.ancestor(bm)
|
||||||
avg = tot / ntot
|
q.order('-__key__')
|
||||||
if 2*avg > max:
|
all = q.fetch(1000)
|
||||||
max = 2*avg
|
|
||||||
# Encoding is 0-61, which is fine enough granularity for our tiny graphs. _ means missing.
|
data = [-1 for x in range(first, last+1)]
|
||||||
encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
for r in all:
|
||||||
s = ""
|
if r.builder == b and first <= r.num and r.num <= last:
|
||||||
for i in range(first, last+1):
|
data[r.num - first] = r.nsperop
|
||||||
if i not in d:
|
present = [x for x in data if x >= 0]
|
||||||
s += "_"
|
if len(present) == 0:
|
||||||
|
memcache.set(key, "", 3600)
|
||||||
continue
|
continue
|
||||||
val = d[i]
|
avg = sum(present) / len(present)
|
||||||
s += encoding[int((len(encoding)-1)*val/max)]
|
maxval = max(2*avg, max(present))
|
||||||
builds.append({"url": "http://chart.apis.google.com/chart?cht=ls&chd=s:"+s})
|
# Encoding is 0-61, which is fine enough granularity for our tiny graphs. _ means missing.
|
||||||
benchmarks.append({"name": name, "builds": builds})
|
encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||||
|
s = ''.join([x < 0 and "_" or encoding[int((len(encoding)-1)*x/maxval)] for x in data])
|
||||||
|
url = "http://chart.apis.google.com/chart?cht=ls&chd=s:"+s
|
||||||
|
memcache.set(key, url, 3600)
|
||||||
|
cell["url"] = url
|
||||||
|
except DeadlineExceededError:
|
||||||
|
# forge ahead with partial benchmark results
|
||||||
|
# the url caches above should make the next page quicker to compute
|
||||||
|
full = False
|
||||||
|
|
||||||
|
names = [bm.name for bm in benchmarks]
|
||||||
|
|
||||||
bs = []
|
bs = []
|
||||||
for b in builders:
|
for b in builders:
|
||||||
@ -395,10 +407,10 @@ class Benchmarks(webapp.RequestHandler):
|
|||||||
note = f[2]
|
note = f[2]
|
||||||
bs.append({'goos': goos, 'goarch': goarch, 'note': note})
|
bs.append({'goos': goos, 'goarch': goarch, 'note': note})
|
||||||
|
|
||||||
values = {"benchmarks": benchmarks, "builders": bs}
|
values = {"benchmarks": rows, "builders": bs}
|
||||||
|
|
||||||
path = os.path.join(os.path.dirname(__file__), 'benchmarks.html')
|
path = os.path.join(os.path.dirname(__file__), 'benchmarks.html')
|
||||||
return template.render(path, values)
|
return template.render(path, values), full
|
||||||
|
|
||||||
def post(self):
|
def post(self):
|
||||||
if not auth(self.request):
|
if not auth(self.request):
|
||||||
@ -444,8 +456,10 @@ class Benchmarks(webapp.RequestHandler):
|
|||||||
b = Benchmark.get_or_insert('v002.' + benchmark.encode('base64'), name = benchmark, version = 2)
|
b = Benchmark.get_or_insert('v002.' + benchmark.encode('base64'), name = benchmark, version = 2)
|
||||||
r = BenchmarkResult(key_name = '%08x/%s' % (n.num, builder), parent = b, num = n.num, iterations = iterations, nsperop = time, builder = builder)
|
r = BenchmarkResult(key_name = '%08x/%s' % (n.num, builder), parent = b, num = n.num, iterations = iterations, nsperop = time, builder = builder)
|
||||||
r.put()
|
r.put()
|
||||||
key = "bench(%d)" % n.num
|
key = "bench(%s,%s,%d)" % (benchmark, builder, n.num)
|
||||||
memcache.delete(key)
|
memcache.delete(key)
|
||||||
|
|
||||||
|
memcache.delete('bench')
|
||||||
self.response.set_status(200)
|
self.response.set_status(200)
|
||||||
|
|
||||||
def node(num):
|
def node(num):
|
||||||
@ -481,6 +495,9 @@ class GetBenchmarks(webapp.RequestHandler):
|
|||||||
minv = r.num
|
minv = r.num
|
||||||
builders.add(r.builder)
|
builders.add(r.builder)
|
||||||
|
|
||||||
|
builders = list(builders)
|
||||||
|
builders.sort()
|
||||||
|
|
||||||
res = {}
|
res = {}
|
||||||
for b in builders:
|
for b in builders:
|
||||||
res[b] = [[-1] * ((maxv - minv) + 1), [-1] * ((maxv - minv) + 1)]
|
res[b] = [[-1] * ((maxv - minv) + 1), [-1] * ((maxv - minv) + 1)]
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
indexes:
|
indexes:
|
||||||
|
|
||||||
# AUTOGENERATED
|
- kind: BenchmarkResult
|
||||||
|
ancestor: yes
|
||||||
# This index.yaml is automatically updated whenever the dev_appserver
|
properties:
|
||||||
# detects that a new type of query is run. If you want to manage the
|
- name: builder
|
||||||
# index.yaml file manually, remove the above marker line (the line
|
- name: __key__
|
||||||
# saying "# AUTOGENERATED"). If you want to manage some indexes
|
direction: desc
|
||||||
# manually, move them above the marker line. The index.yaml file is
|
|
||||||
# automatically uploaded to the admin console when you next deploy
|
|
||||||
# your application using appcfg.py.
|
|
||||||
|
|
||||||
- kind: BenchmarkResult
|
- kind: BenchmarkResult
|
||||||
ancestor: yes
|
ancestor: yes
|
||||||
@ -20,3 +17,14 @@ indexes:
|
|||||||
properties:
|
properties:
|
||||||
- name: __key__
|
- name: __key__
|
||||||
direction: desc
|
direction: desc
|
||||||
|
|
||||||
|
# AUTOGENERATED
|
||||||
|
|
||||||
|
# This index.yaml is automatically updated whenever the dev_appserver
|
||||||
|
# detects that a new type of query is run. If you want to manage the
|
||||||
|
# index.yaml file manually, remove the above marker line (the line
|
||||||
|
# saying "# AUTOGENERATED"). If you want to manage some indexes
|
||||||
|
# manually, move them above the marker line. The index.yaml file is
|
||||||
|
# automatically uploaded to the admin console when you next deploy
|
||||||
|
# your application using appcfg.py.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user