mirror of
https://github.com/golang/go
synced 2024-11-06 06:26:13 -07:00
224c947ce5
This separates the concerns of tag collections that have to be iterated and tag collections that need lookup by key. Also make it so that events just carry a plain slice of tags. We pass a TagMap down through the exporters and allow it to be extended on the way. We no longer need the event.Query method (or the event type) We now exclusivley use Key as the identity, and no longer have a common core implementation but just implement it directly in each type. This removes some confusion that was causing the same key through different paths to end up with a different identity. Change-Id: I61e47adcb397f4ca83dd90342b021dd8e9571ed3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/224278 Run-TryBot: Ian Cottrell <iancottrell@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
128 lines
3.5 KiB
Go
128 lines
3.5 KiB
Go
// Copyright 2019 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package prometheus
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"net/http"
|
|
"sort"
|
|
"sync"
|
|
|
|
"golang.org/x/tools/internal/telemetry/event"
|
|
"golang.org/x/tools/internal/telemetry/export/metric"
|
|
)
|
|
|
|
func New() *Exporter {
|
|
return &Exporter{}
|
|
}
|
|
|
|
type Exporter struct {
|
|
mu sync.Mutex
|
|
metrics []metric.Data
|
|
}
|
|
|
|
func (e *Exporter) ProcessEvent(ctx context.Context, ev event.Event, tagMap event.TagMap) context.Context {
|
|
if !ev.IsRecord() {
|
|
return ctx
|
|
}
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
metrics := metric.Entries.Get(tagMap).([]metric.Data)
|
|
for _, data := range metrics {
|
|
name := data.Handle()
|
|
// We keep the metrics in name sorted order so the page is stable and easy
|
|
// to read. We do this with an insertion sort rather than sorting the list
|
|
// each time
|
|
index := sort.Search(len(e.metrics), func(i int) bool {
|
|
return e.metrics[i].Handle() >= name
|
|
})
|
|
if index >= len(e.metrics) || e.metrics[index].Handle() != name {
|
|
// we have a new metric, so we need to make a space for it
|
|
old := e.metrics
|
|
e.metrics = make([]metric.Data, len(old)+1)
|
|
copy(e.metrics, old[:index])
|
|
copy(e.metrics[index+1:], old[index:])
|
|
}
|
|
e.metrics[index] = data
|
|
}
|
|
return ctx
|
|
}
|
|
|
|
func (e *Exporter) header(w http.ResponseWriter, name, description string, isGauge, isHistogram bool) {
|
|
kind := "counter"
|
|
if isGauge {
|
|
kind = "gauge"
|
|
}
|
|
if isHistogram {
|
|
kind = "histogram"
|
|
}
|
|
fmt.Fprintf(w, "# HELP %s %s\n", name, description)
|
|
fmt.Fprintf(w, "# TYPE %s %s\n", name, kind)
|
|
}
|
|
|
|
func (e *Exporter) row(w http.ResponseWriter, name string, group []event.Tag, extra string, value interface{}) {
|
|
fmt.Fprint(w, name)
|
|
buf := &bytes.Buffer{}
|
|
fmt.Fprint(buf, group)
|
|
if extra != "" {
|
|
if buf.Len() > 0 {
|
|
fmt.Fprint(buf, ",")
|
|
}
|
|
fmt.Fprint(buf, extra)
|
|
}
|
|
if buf.Len() > 0 {
|
|
fmt.Fprint(w, "{")
|
|
buf.WriteTo(w)
|
|
fmt.Fprint(w, "}")
|
|
}
|
|
fmt.Fprintf(w, " %v\n", value)
|
|
}
|
|
|
|
func (e *Exporter) Serve(w http.ResponseWriter, r *http.Request) {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
for _, data := range e.metrics {
|
|
switch data := data.(type) {
|
|
case *metric.Int64Data:
|
|
e.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
|
for i, group := range data.Groups() {
|
|
e.row(w, data.Info.Name, group, "", data.Rows[i])
|
|
}
|
|
|
|
case *metric.Float64Data:
|
|
e.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
|
for i, group := range data.Groups() {
|
|
e.row(w, data.Info.Name, group, "", data.Rows[i])
|
|
}
|
|
|
|
case *metric.HistogramInt64Data:
|
|
e.header(w, data.Info.Name, data.Info.Description, false, true)
|
|
for i, group := range data.Groups() {
|
|
row := data.Rows[i]
|
|
for j, b := range data.Info.Buckets {
|
|
e.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
|
}
|
|
e.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
|
e.row(w, data.Info.Name+"_count", group, "", row.Count)
|
|
e.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
|
}
|
|
|
|
case *metric.HistogramFloat64Data:
|
|
e.header(w, data.Info.Name, data.Info.Description, false, true)
|
|
for i, group := range data.Groups() {
|
|
row := data.Rows[i]
|
|
for j, b := range data.Info.Buckets {
|
|
e.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
|
}
|
|
e.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
|
e.row(w, data.Info.Name+"_count", group, "", row.Count)
|
|
e.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
|
}
|
|
}
|
|
}
|
|
}
|