2019-08-14 10:51:42 -06:00
|
|
|
// Copyright 2019 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package prometheus
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
|
|
|
"sort"
|
2019-08-17 21:26:28 -06:00
|
|
|
"sync"
|
2019-08-14 10:51:42 -06:00
|
|
|
|
2020-03-07 16:02:27 -07:00
|
|
|
"golang.org/x/tools/internal/telemetry/event"
|
2019-08-14 10:51:42 -06:00
|
|
|
"golang.org/x/tools/internal/telemetry/metric"
|
|
|
|
)
|
|
|
|
|
|
|
|
func New() *Exporter {
|
|
|
|
return &Exporter{}
|
|
|
|
}
|
|
|
|
|
|
|
|
type Exporter struct {
|
2019-08-17 21:26:28 -06:00
|
|
|
mu sync.Mutex
|
2020-03-07 16:02:27 -07:00
|
|
|
metrics []event.MetricData
|
2019-08-14 10:51:42 -06:00
|
|
|
}
|
|
|
|
|
2020-03-07 16:02:27 -07:00
|
|
|
func (e *Exporter) Metric(ctx context.Context, data event.MetricData) {
|
2019-08-17 21:26:28 -06:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
2019-08-14 10:51:42 -06:00
|
|
|
name := data.Handle()
|
|
|
|
// We keep the metrics in name sorted order so the page is stable and easy
|
|
|
|
// to read. We do this with an insertion sort rather than sorting the list
|
|
|
|
// each time
|
|
|
|
index := sort.Search(len(e.metrics), func(i int) bool {
|
|
|
|
return e.metrics[i].Handle() >= name
|
|
|
|
})
|
|
|
|
if index >= len(e.metrics) || e.metrics[index].Handle() != name {
|
|
|
|
// we have a new metric, so we need to make a space for it
|
|
|
|
old := e.metrics
|
2020-03-07 16:02:27 -07:00
|
|
|
e.metrics = make([]event.MetricData, len(old)+1)
|
2019-08-14 10:51:42 -06:00
|
|
|
copy(e.metrics, old[:index])
|
|
|
|
copy(e.metrics[index+1:], old[index:])
|
|
|
|
}
|
|
|
|
e.metrics[index] = data
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Exporter) header(w http.ResponseWriter, name, description string, isGauge, isHistogram bool) {
|
|
|
|
kind := "counter"
|
|
|
|
if isGauge {
|
|
|
|
kind = "gauge"
|
|
|
|
}
|
|
|
|
if isHistogram {
|
|
|
|
kind = "histogram"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, "# HELP %s %s\n", name, description)
|
|
|
|
fmt.Fprintf(w, "# TYPE %s %s\n", name, kind)
|
|
|
|
}
|
|
|
|
|
2020-03-07 16:02:27 -07:00
|
|
|
func (e *Exporter) row(w http.ResponseWriter, name string, group event.TagList, extra string, value interface{}) {
|
2019-08-14 10:51:42 -06:00
|
|
|
fmt.Fprint(w, name)
|
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
fmt.Fprint(buf, group)
|
|
|
|
if extra != "" {
|
|
|
|
if buf.Len() > 0 {
|
|
|
|
fmt.Fprint(buf, ",")
|
|
|
|
}
|
|
|
|
fmt.Fprint(buf, extra)
|
|
|
|
}
|
|
|
|
if buf.Len() > 0 {
|
|
|
|
fmt.Fprint(w, "{")
|
|
|
|
buf.WriteTo(w)
|
|
|
|
fmt.Fprint(w, "}")
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, " %v\n", value)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Exporter) Serve(w http.ResponseWriter, r *http.Request) {
|
2019-08-17 21:26:28 -06:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
for _, data := range e.metrics {
|
|
|
|
switch data := data.(type) {
|
|
|
|
case *metric.Int64Data:
|
|
|
|
e.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
|
|
|
for i, group := range data.Groups() {
|
|
|
|
e.row(w, data.Info.Name, group, "", data.Rows[i])
|
|
|
|
}
|
2019-08-14 10:51:42 -06:00
|
|
|
|
2019-08-17 21:26:28 -06:00
|
|
|
case *metric.Float64Data:
|
|
|
|
e.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
|
|
|
for i, group := range data.Groups() {
|
|
|
|
e.row(w, data.Info.Name, group, "", data.Rows[i])
|
|
|
|
}
|
2019-08-14 10:51:42 -06:00
|
|
|
|
2019-08-17 21:26:28 -06:00
|
|
|
case *metric.HistogramInt64Data:
|
|
|
|
e.header(w, data.Info.Name, data.Info.Description, false, true)
|
|
|
|
for i, group := range data.Groups() {
|
|
|
|
row := data.Rows[i]
|
|
|
|
for j, b := range data.Info.Buckets {
|
|
|
|
e.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
2019-08-14 10:51:42 -06:00
|
|
|
}
|
2019-08-17 21:26:28 -06:00
|
|
|
e.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
|
|
|
e.row(w, data.Info.Name+"_count", group, "", row.Count)
|
|
|
|
e.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
|
|
|
}
|
2019-08-14 10:51:42 -06:00
|
|
|
|
2019-08-17 21:26:28 -06:00
|
|
|
case *metric.HistogramFloat64Data:
|
|
|
|
e.header(w, data.Info.Name, data.Info.Description, false, true)
|
|
|
|
for i, group := range data.Groups() {
|
|
|
|
row := data.Rows[i]
|
|
|
|
for j, b := range data.Info.Buckets {
|
|
|
|
e.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
2019-08-14 10:51:42 -06:00
|
|
|
}
|
2019-08-17 21:26:28 -06:00
|
|
|
e.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
|
|
|
e.row(w, data.Info.Name+"_count", group, "", row.Count)
|
|
|
|
e.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
2019-08-14 10:51:42 -06:00
|
|
|
}
|
|
|
|
}
|
2019-08-17 21:26:28 -06:00
|
|
|
}
|
2019-08-14 10:51:42 -06:00
|
|
|
}
|