Auth for metrics, removed unwanted metrics and fixed tests.

This commit is contained in:
2026-03-02 22:30:15 +01:00
parent 6ca3fb8701
commit 8c9cd96213
5 changed files with 41 additions and 43 deletions

View File

@@ -110,13 +110,14 @@ func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) {
}
func (h *Handler) handleMetrics(w http.ResponseWriter, r *http.Request) {
payload := metrics.Default.RenderPrometheus()
w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
w.Header().Set("Content-Length", strconv.Itoa(len(payload)))
w.WriteHeader(http.StatusOK)
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
payload := metrics.Default.RenderPrometheus()
w.Header().Set("Content-Length", strconv.Itoa(len(payload)))
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(payload))
}
@@ -645,7 +646,6 @@ func newLimitedListener(inner net.Listener, maxConns int) net.Listener {
return inner
}
metrics.Default.SetConnectionPoolMax(maxConns)
metrics.Default.SetWorkerPoolSize(maxConns)
return &limitedListener{
Listener: inner,
slots: make(chan struct{}, maxConns),

View File

@@ -25,7 +25,7 @@ func Middleware(
return
}
if r.URL.Path == "/healthz" || r.URL.Path == "/metrics" {
if r.URL.Path == "/healthz" {
metrics.Default.ObserveAuth("bypass", "none", "public_endpoint")
next.ServeHTTP(w, r.WithContext(WithRequestContext(r.Context(), authCtx)))
return

View File

@@ -90,10 +90,8 @@ func HTTPMiddleware(logger *slog.Logger, cfg Config) func(http.Handler) http.Han
ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)
op := metricOperationLabel(r)
metrics.Default.IncHTTPInFlightOp(op)
metrics.Default.IncWorkerPoolActive()
defer func() {
metrics.Default.DecHTTPInFlightOp(op)
metrics.Default.DecWorkerPoolActive()
}()
requestID := middleware.GetReqID(r.Context())
if requestID != "" {

View File

@@ -75,8 +75,6 @@ type Registry struct {
connectionPoolWaits atomic.Uint64
requestQueueLength atomic.Int64
workerPoolActive atomic.Int64
workerPoolSize atomic.Int64
mu sync.Mutex
@@ -364,21 +362,6 @@ func (r *Registry) DecRequestQueueLength() {
r.requestQueueLength.Add(-1)
}
func (r *Registry) SetWorkerPoolSize(size int) {
if size < 0 {
size = 0
}
r.workerPoolSize.Store(int64(size))
}
func (r *Registry) IncWorkerPoolActive() {
r.workerPoolActive.Add(1)
}
func (r *Registry) DecWorkerPoolActive() {
r.workerPoolActive.Add(-1)
}
func (r *Registry) ObserveLockWait(lockName string, d time.Duration) {
lockName = strings.TrimSpace(lockName)
if lockName == "" {
@@ -520,10 +503,7 @@ func (r *Registry) RenderPrometheus() string {
connectionMax := float64(r.connectionPoolMax.Load())
connectionWaits := r.connectionPoolWaits.Load()
queueLength := float64(r.requestQueueLength.Load())
workerActive := float64(r.workerPoolActive.Load())
workerSize := float64(r.workerPoolSize.Load())
openFDs, hasOpenFDs := readOpenFDs()
resident, hasResident := readResidentMemoryBytes()
cpuSeconds, hasCPU := readProcessCPUSeconds()
@@ -555,8 +535,6 @@ func (r *Registry) RenderPrometheus() string {
writeCounter(&b, "fs_connection_pool_waits_total", "Number of waits due to pool saturation.", connectionWaits)
writeGauge(&b, "fs_request_queue_length", "Requests waiting for an execution slot.", queueLength)
writeGauge(&b, "fs_worker_pool_active", "Active workers.", workerActive)
writeGauge(&b, "fs_worker_pool_size", "Configured worker pool size.", workerSize)
writeHistogramVecKV(&b, "fs_lock_wait_seconds", "Time spent waiting for locks.", lockWait, []string{"lock_name"})
writeHistogramVecKV(&b, "fs_lock_hold_seconds", "Time locks were held.", lockHold, []string{"lock_name"})
@@ -594,10 +572,6 @@ func (r *Registry) RenderPrometheus() string {
if hasResident {
writeGauge(&b, "process_resident_memory_bytes", "Resident memory size in bytes.", resident)
}
if hasOpenFDs {
writeGauge(&b, "process_open_fds", "Number of open file descriptors.", openFDs)
writeGauge(&b, "fs_open_fds", "Number of open file descriptors.", openFDs)
}
return b.String()
}
@@ -732,8 +706,16 @@ func writeHistogramWithLabelsMap(b *strings.Builder, name string, labels map[str
}
fmt.Fprintf(b, "%s_bucket{%s} %d\n", name, labelsToString(bucketLabels), cumulative)
}
fmt.Fprintf(b, "%s_sum{%s} %.9f\n", name, labelsToString(labels), s.sum)
fmt.Fprintf(b, "%s_count{%s} %d\n", name, labelsToString(labels), s.count)
labelsSuffix := formatLabelsSuffix(labels)
fmt.Fprintf(b, "%s_sum%s %.9f\n", name, labelsSuffix, s.sum)
fmt.Fprintf(b, "%s_count%s %d\n", name, labelsSuffix, s.count)
}
func formatLabelsSuffix(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
return "{" + labelsToString(labels) + "}"
}
func formatLabels(keys, values []string) string {
@@ -786,14 +768,6 @@ func escapeLabelValue(value string) string {
return value
}
func readOpenFDs() (float64, bool) {
entries, err := os.ReadDir("/proc/self/fd")
if err != nil {
return 0, false
}
return float64(len(entries)), true
}
func readResidentMemoryBytes() (float64, bool) {
data, err := os.ReadFile("/proc/self/statm")
if err != nil {

26
metrics/metrics_test.go Normal file
View File

@@ -0,0 +1,26 @@
package metrics
import (
"strings"
"testing"
)
func TestRenderPrometheusHistogramNoEmptyLabelSet(t *testing.T) {
reg := NewRegistry()
reg.ObserveBatchSize(3)
reg.ObserveGC(0, 0, 0, 0, true)
out := reg.RenderPrometheus()
if strings.Contains(out, "fs_batch_size_histogram_sum{}") {
t.Fatalf("unexpected empty label set for batch sum metric")
}
if strings.Contains(out, "fs_batch_size_histogram_count{}") {
t.Fatalf("unexpected empty label set for batch count metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_sum{}") {
t.Fatalf("unexpected empty label set for gc sum metric")
}
if strings.Contains(out, "fs_gc_duration_seconds_count{}") {
t.Fatalf("unexpected empty label set for gc count metric")
}
}