// Managed by Ansible. River config for Grafana Alloy. // // Two scrape jobs: // - node_exporter (host: CPU, mem, disk, network, fs, etc.) // - shithubd /metrics (app: req latency, DB pool, panics, jobs) // // Both push to Grafana Cloud Mimir via the prometheus.remote_write // component below. The basic-auth username/password come from // /etc/alloy/credentials.env via EnvironmentFile= on the unit. // // Why this shape (not separate prometheus + grafana on the droplet): // - One binary, one config, one systemd unit. // - No inbound 9090 to firewall — push-only. // - Free-tier Grafana Cloud handles ~10k active series, way over // what one shithubd droplet emits. prometheus.remote_write "grafana_cloud" { endpoint { url = sys.env("GRAFANA_CLOUD_PROM_URL") basic_auth { username = sys.env("GRAFANA_CLOUD_PROM_USER") password = sys.env("GRAFANA_CLOUD_PROM_TOKEN") } } // Drop the most cardinality-expensive labels from go runtime metrics // before remote_write. The free tier limits unique series; bare go_* // gives plenty of signal without the per-bucket explosions. external_labels = { instance = "{{ ansible_hostname }}", cluster = "shithub-prod", } } // ── node_exporter ─────────────────────────────────────────────── prometheus.scrape "node_exporter" { targets = [ { __address__ = "127.0.0.1:9100", job = "node" }, ] forward_to = [prometheus.remote_write.grafana_cloud.receiver] scrape_interval = "30s" scrape_timeout = "10s" } // ── shithubd /metrics ─────────────────────────────────────────── // enable_compression = false: shithubd's HTTP middleware auto-gzips // when the client advertises Accept-Encoding: gzip and correctly sets // Content-Encoding: gzip on the response. Alloy 1.16's Prometheus // scraper mis-handles that path: it parses the raw 0x1f gzip magic // byte as text and the scrape fails with up=0. Disabling Accept- // Encoding negotiation forces shithubd to return plain text. The // /metrics payload is small enough that wire-size cost is irrelevant. // Revisit once Alloy fixes the gzip-aware text parser upstream. prometheus.scrape "shithubd" { targets = [ { __address__ = "127.0.0.1:8080", job = "shithubd", __metrics_path__ = "/metrics" }, ] forward_to = [prometheus.remote_write.grafana_cloud.receiver] scrape_interval = "15s" scrape_timeout = "10s" enable_compression = false }