@@ -0,0 +1,83 @@ |
| 1 | +// SPDX-License-Identifier: AGPL-3.0-or-later |
| 2 | + |
| 3 | +package git |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "strconv" |
| 8 | + |
| 9 | + "github.com/tenseleyFlow/shithub/internal/cache/lru" |
| 10 | +) |
| 11 | + |
| 12 | +// AheadBehindKey is the (repo_id, base_oid, head_oid) tuple. The |
| 13 | +// OIDs make the cache safe across pushes — when either ref moves, |
| 14 | +// the new OID forms a different key. Old keys age out via LRU |
| 15 | +// eviction; explicit invalidation isn't strictly required but helps |
| 16 | +// keep the working set small (push handlers call InvalidateRepo). |
| 17 | +type AheadBehindKey struct { |
| 18 | + RepoID int64 |
| 19 | + BaseOID string |
| 20 | + HeadOID string |
| 21 | +} |
| 22 | + |
| 23 | +// AheadBehindResult is the cached payload. |
| 24 | +type AheadBehindResult struct { |
| 25 | + Ahead, Behind int |
| 26 | +} |
| 27 | + |
| 28 | +// abGroup is the process-global single-flight cache. 4096 entries ≈ |
| 29 | +// a few hundred repos with active branch sets; bounded enough that |
| 30 | +// the cache itself never dominates RSS, large enough to survive the |
| 31 | +// branch-list page's burst of (default vs N branches) lookups. |
| 32 | +var abGroup = lru.NewGroup( |
| 33 | + lru.New[AheadBehindKey, AheadBehindResult](4096), |
| 34 | + abKeyer, |
| 35 | +) |
| 36 | + |
| 37 | +func abKeyer(k AheadBehindKey) string { |
| 38 | + return strconv.FormatInt(k.RepoID, 10) + "|" + k.BaseOID + "|" + k.HeadOID |
| 39 | +} |
| 40 | + |
| 41 | +// AheadBehindCached is the cached + single-flighted variant of |
| 42 | +// AheadBehind. Callers pass the resolved OIDs (not ref names) so the |
| 43 | +// key is stable across ref-name renames and the cache is never |
| 44 | +// poisoned by a stale ref pointer. |
| 45 | +// |
| 46 | +// On cache miss the underlying `git rev-list` runs once even when |
| 47 | +// many requests arrive concurrently for the same key (single-flight |
| 48 | +// dogpile guard). The result is cached until LRU eviction or |
| 49 | +// explicit InvalidateRepo. |
| 50 | +func AheadBehindCached(ctx context.Context, gitDir string, key AheadBehindKey) (AheadBehindResult, error) { |
| 51 | + return abGroup.Do(ctx, key, func(ctx context.Context) (AheadBehindResult, error) { |
| 52 | + ahead, behind, err := AheadBehind(ctx, gitDir, key.BaseOID, key.HeadOID) |
| 53 | + if err != nil { |
| 54 | + return AheadBehindResult{}, err |
| 55 | + } |
| 56 | + return AheadBehindResult{Ahead: ahead, Behind: behind}, nil |
| 57 | + }) |
| 58 | +} |
| 59 | + |
| 60 | +// InvalidateAheadBehindForRepo drops every cached entry whose key |
| 61 | +// matches repoID. Called from push:process so a force-push that |
| 62 | +// rewrites history doesn't surface stale ahead/behind counts. |
| 63 | +// |
| 64 | +// The current implementation is approximate: the LRU exposes Delete |
| 65 | +// per-key, not by-prefix scan. Push handlers that know the specific |
| 66 | +// OIDs that moved should call InvalidateAheadBehind directly; this |
| 67 | +// helper is kept as a documented future extension point so the API |
| 68 | +// is stable when a richer scan lands. |
| 69 | +func InvalidateAheadBehindForRepo(repoID int64) { |
| 70 | + // Intentionally a no-op for now — see comment above. The LRU |
| 71 | + // pressure is bounded by capacity, so stale entries age out |
| 72 | + // naturally; correctness is preserved because the cache key |
| 73 | + // includes the OID. |
| 74 | + _ = repoID |
| 75 | +} |
| 76 | + |
| 77 | +// InvalidateAheadBehind drops one specific (repo, base, head) entry. |
| 78 | +// Use from push:process when the exact OIDs are known. |
| 79 | +func InvalidateAheadBehind(key AheadBehindKey) { abGroup.Invalidate(key) } |
| 80 | + |
| 81 | +// AheadBehindCacheStats exposes hit/miss counters for the /metrics |
| 82 | +// surface and bench reports. |
| 83 | +func AheadBehindCacheStats() lru.Stats { return abGroup.Stats() } |