@@ -0,0 +1,195 @@ |
| | 1 | +// SPDX-License-Identifier: AGPL-3.0-or-later |
| | 2 | + |
| | 3 | +package api |
| | 4 | + |
| | 5 | +import ( |
| | 6 | + "context" |
| | 7 | + "net/http" |
| | 8 | + "strconv" |
| | 9 | + "strings" |
| | 10 | + "time" |
| | 11 | + |
| | 12 | + "github.com/go-chi/chi/v5" |
| | 13 | + "github.com/jackc/pgx/v5/pgtype" |
| | 14 | + |
| | 15 | + actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc" |
| | 16 | + "github.com/tenseleyFlow/shithub/internal/auth/pat" |
| | 17 | + "github.com/tenseleyFlow/shithub/internal/auth/policy" |
| | 18 | + "github.com/tenseleyFlow/shithub/internal/web/handlers/api/apipage" |
| | 19 | + "github.com/tenseleyFlow/shithub/internal/web/middleware" |
| | 20 | +) |
| | 21 | + |
| | 22 | +// mountActionsCaches registers the S50 §13 caches REST surface. |
| | 23 | +// |
| | 24 | +// GET /api/v1/repos/{o}/{r}/actions/caches[?key=&ref=&page=&per_page=] |
| | 25 | +// DELETE /api/v1/repos/{o}/{r}/actions/caches?key=...[&ref=...] |
| | 26 | +// DELETE /api/v1/repos/{o}/{r}/actions/caches/{cache_id} |
| | 27 | +// |
| | 28 | +// Scopes: `repo:read` on the list, `repo:write` on the deletes. |
| | 29 | +// |
| | 30 | +// The runner-side upload protocol that populates this table is its |
| | 31 | +// own future sprint. This REST surface lands first so operators have |
| | 32 | +// observability and can purge stale entries by id or key, even |
| | 33 | +// before any cache rows exist. |
| | 34 | +func (h *Handlers) mountActionsCaches(r chi.Router) { |
| | 35 | + r.Group(func(r chi.Router) { |
| | 36 | + r.Use(middleware.RequireScope(pat.ScopeRepoRead)) |
| | 37 | + r.Get("/api/v1/repos/{owner}/{repo}/actions/caches", h.actionsCachesList) |
| | 38 | + }) |
| | 39 | + r.Group(func(r chi.Router) { |
| | 40 | + r.Use(middleware.RequireScope(pat.ScopeRepoWrite)) |
| | 41 | + r.Delete("/api/v1/repos/{owner}/{repo}/actions/caches", h.actionsCachesDeleteByKey) |
| | 42 | + r.Delete("/api/v1/repos/{owner}/{repo}/actions/caches/{cache_id}", h.actionsCacheDeleteByID) |
| | 43 | + }) |
| | 44 | +} |
| | 45 | + |
| | 46 | +type cacheResponse struct { |
| | 47 | + ID int64 `json:"id"` |
| | 48 | + Key string `json:"key"` |
| | 49 | + Version string `json:"version"` |
| | 50 | + Ref string `json:"ref"` |
| | 51 | + SizeBytes int64 `json:"size_bytes"` |
| | 52 | + LastAccessedAt string `json:"last_accessed_at"` |
| | 53 | + CreatedAt string `json:"created_at"` |
| | 54 | +} |
| | 55 | + |
| | 56 | +func presentCache(row actionsdb.WorkflowCache) cacheResponse { |
| | 57 | + return cacheResponse{ |
| | 58 | + ID: row.ID, |
| | 59 | + Key: row.CacheKey, |
| | 60 | + Version: row.CacheVersion, |
| | 61 | + Ref: row.GitRef, |
| | 62 | + SizeBytes: row.SizeBytes, |
| | 63 | + LastAccessedAt: row.LastAccessedAt.Time.UTC().Format(time.RFC3339), |
| | 64 | + CreatedAt: row.CreatedAt.Time.UTC().Format(time.RFC3339), |
| | 65 | + } |
| | 66 | +} |
| | 67 | + |
| | 68 | +func (h *Handlers) actionsCachesList(w http.ResponseWriter, r *http.Request) { |
| | 69 | + repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoRead) |
| | 70 | + if !ok { |
| | 71 | + return |
| | 72 | + } |
| | 73 | + page, perPage := apipage.ParseQuery(r, apipage.DefaultPerPage, apipage.MaxPerPage) |
| | 74 | + q := actionsdb.New() |
| | 75 | + ref := pgTextOrNull(r.URL.Query().Get("ref")) |
| | 76 | + key := pgTextOrNull(r.URL.Query().Get("key")) |
| | 77 | + total, err := q.CountWorkflowCachesForRepo(r.Context(), h.d.Pool, actionsdb.CountWorkflowCachesForRepoParams{ |
| | 78 | + RepoID: repo.ID, GitRef: ref, CacheKey: key, |
| | 79 | + }) |
| | 80 | + if err != nil { |
| | 81 | + h.d.Logger.ErrorContext(r.Context(), "api: count caches", "error", err) |
| | 82 | + writeAPIError(w, http.StatusInternalServerError, "list failed") |
| | 83 | + return |
| | 84 | + } |
| | 85 | + rows, err := q.ListWorkflowCachesForRepo(r.Context(), h.d.Pool, actionsdb.ListWorkflowCachesForRepoParams{ |
| | 86 | + RepoID: repo.ID, |
| | 87 | + Limit: int32(perPage), |
| | 88 | + Offset: int32((page - 1) * perPage), |
| | 89 | + GitRef: ref, |
| | 90 | + CacheKey: key, |
| | 91 | + }) |
| | 92 | + if err != nil { |
| | 93 | + h.d.Logger.ErrorContext(r.Context(), "api: list caches", "error", err) |
| | 94 | + writeAPIError(w, http.StatusInternalServerError, "list failed") |
| | 95 | + return |
| | 96 | + } |
| | 97 | + out := make([]cacheResponse, 0, len(rows)) |
| | 98 | + for _, row := range rows { |
| | 99 | + out = append(out, presentCache(row)) |
| | 100 | + } |
| | 101 | + link := apipage.Page{Current: page, PerPage: perPage, Total: int(total)}.LinkHeader(h.d.BaseURL, sanitizedURL(r)) |
| | 102 | + if link != "" { |
| | 103 | + w.Header().Set("Link", link) |
| | 104 | + } |
| | 105 | + writeJSON(w, http.StatusOK, map[string]any{ |
| | 106 | + "total_count": total, |
| | 107 | + "actions_caches": out, |
| | 108 | + }) |
| | 109 | +} |
| | 110 | + |
| | 111 | +func (h *Handlers) actionsCacheDeleteByID(w http.ResponseWriter, r *http.Request) { |
| | 112 | + repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoWrite) |
| | 113 | + if !ok { |
| | 114 | + return |
| | 115 | + } |
| | 116 | + cacheID, err := strconv.ParseInt(chi.URLParam(r, "cache_id"), 10, 64) |
| | 117 | + if err != nil { |
| | 118 | + writeAPIError(w, http.StatusNotFound, "cache not found") |
| | 119 | + return |
| | 120 | + } |
| | 121 | + q := actionsdb.New() |
| | 122 | + row, err := q.GetWorkflowCacheByID(r.Context(), h.d.Pool, cacheID) |
| | 123 | + if err != nil || row.RepoID != repo.ID { |
| | 124 | + writeAPIError(w, http.StatusNotFound, "cache not found") |
| | 125 | + return |
| | 126 | + } |
| | 127 | + n, err := q.DeleteWorkflowCacheByID(r.Context(), h.d.Pool, actionsdb.DeleteWorkflowCacheByIDParams{ |
| | 128 | + ID: cacheID, RepoID: repo.ID, |
| | 129 | + }) |
| | 130 | + if err != nil { |
| | 131 | + h.d.Logger.ErrorContext(r.Context(), "api: delete cache", "error", err) |
| | 132 | + writeAPIError(w, http.StatusInternalServerError, "delete failed") |
| | 133 | + return |
| | 134 | + } |
| | 135 | + if n == 0 { |
| | 136 | + writeAPIError(w, http.StatusNotFound, "cache not found") |
| | 137 | + return |
| | 138 | + } |
| | 139 | + if h.d.ObjectStore != nil { |
| | 140 | + go h.purgeCacheObjects(context.WithoutCancel(r.Context()), []string{row.ObjectKey}) |
| | 141 | + } |
| | 142 | + w.WriteHeader(http.StatusNoContent) |
| | 143 | +} |
| | 144 | + |
| | 145 | +func (h *Handlers) actionsCachesDeleteByKey(w http.ResponseWriter, r *http.Request) { |
| | 146 | + repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoWrite) |
| | 147 | + if !ok { |
| | 148 | + return |
| | 149 | + } |
| | 150 | + key := strings.TrimSpace(r.URL.Query().Get("key")) |
| | 151 | + if key == "" { |
| | 152 | + writeAPIError(w, http.StatusBadRequest, "key query parameter required") |
| | 153 | + return |
| | 154 | + } |
| | 155 | + ref := pgTextOrNull(r.URL.Query().Get("ref")) |
| | 156 | + objectKeys, err := actionsdb.New().DeleteWorkflowCachesByKey(r.Context(), h.d.Pool, actionsdb.DeleteWorkflowCachesByKeyParams{ |
| | 157 | + RepoID: repo.ID, |
| | 158 | + CacheKey: key, |
| | 159 | + GitRef: ref, |
| | 160 | + }) |
| | 161 | + if err != nil { |
| | 162 | + h.d.Logger.ErrorContext(r.Context(), "api: delete caches by key", "error", err) |
| | 163 | + writeAPIError(w, http.StatusInternalServerError, "delete failed") |
| | 164 | + return |
| | 165 | + } |
| | 166 | + if h.d.ObjectStore != nil && len(objectKeys) > 0 { |
| | 167 | + go h.purgeCacheObjects(context.WithoutCancel(r.Context()), objectKeys) |
| | 168 | + } |
| | 169 | + w.WriteHeader(http.StatusNoContent) |
| | 170 | +} |
| | 171 | + |
| | 172 | +// purgeCacheObjects mirrors purgeArtifactObjects but for cache |
| | 173 | +// tarballs. Detached from the request so the response returns even |
| | 174 | +// if the object-store deletes are slow; failures fall back to the |
| | 175 | +// eventual eviction sweeper (future sprint). |
| | 176 | +func (h *Handlers) purgeCacheObjects(parent context.Context, keys []string) { |
| | 177 | + ctx, cancel := context.WithTimeout(parent, 30*time.Second) |
| | 178 | + defer cancel() |
| | 179 | + for _, k := range keys { |
| | 180 | + if err := h.d.ObjectStore.Delete(ctx, k); err != nil { |
| | 181 | + h.d.Logger.Warn("api: purge cache object", "key", k, "error", err) |
| | 182 | + } |
| | 183 | + } |
| | 184 | +} |
| | 185 | + |
| | 186 | +// pgTextOrNull builds a pgtype.Text whose Valid bit follows whether |
| | 187 | +// the trimmed input is empty. Maps "" → NULL parameter so the SQL |
| | 188 | +// "is null OR filter equals" pattern degenerates to "no filter". |
| | 189 | +func pgTextOrNull(s string) pgtype.Text { |
| | 190 | + s = strings.TrimSpace(s) |
| | 191 | + if s == "" { |
| | 192 | + return pgtype.Text{} |
| | 193 | + } |
| | 194 | + return pgtype.Text{String: s, Valid: true} |
| | 195 | +} |