| 1 | // SPDX-License-Identifier: AGPL-3.0-or-later |
| 2 | |
| 3 | package api |
| 4 | |
| 5 | import ( |
| 6 | "encoding/base64" |
| 7 | "encoding/json" |
| 8 | "errors" |
| 9 | "net/http" |
| 10 | "strings" |
| 11 | |
| 12 | "github.com/go-chi/chi/v5" |
| 13 | |
| 14 | "github.com/tenseleyFlow/shithub/internal/auth/audit" |
| 15 | "github.com/tenseleyFlow/shithub/internal/auth/pat" |
| 16 | "github.com/tenseleyFlow/shithub/internal/auth/policy" |
| 17 | "github.com/tenseleyFlow/shithub/internal/auth/throttle" |
| 18 | "github.com/tenseleyFlow/shithub/internal/repos" |
| 19 | "github.com/tenseleyFlow/shithub/internal/repos/fork" |
| 20 | "github.com/tenseleyFlow/shithub/internal/repos/git" |
| 21 | reposdb "github.com/tenseleyFlow/shithub/internal/repos/sqlc" |
| 22 | "github.com/tenseleyFlow/shithub/internal/web/middleware" |
| 23 | ) |
| 24 | |
| 25 | // mountReposFollowups registers the S50 §2 follow-up endpoints that |
| 26 | // round out the repos REST surface: README fetch, topics replace + |
| 27 | // clear, and fork-sync. |
| 28 | // |
| 29 | // GET /api/v1/repos/{o}/{r}/readme[?ref=] |
| 30 | // PUT /api/v1/repos/{o}/{r}/topics |
| 31 | // DELETE /api/v1/repos/{o}/{r}/topics |
| 32 | // POST /api/v1/repos/{o}/{r}/merge-upstream |
| 33 | // |
| 34 | // Scopes: `repo:read` on the README GET, `repo:write` on the |
| 35 | // mutations. |
| 36 | func (h *Handlers) mountReposFollowups(r chi.Router) { |
| 37 | r.Group(func(r chi.Router) { |
| 38 | r.Use(middleware.RequireScope(pat.ScopeRepoRead)) |
| 39 | r.Get("/api/v1/repos/{owner}/{repo}/readme", h.repoReadmeGet) |
| 40 | }) |
| 41 | r.Group(func(r chi.Router) { |
| 42 | r.Use(middleware.RequireScope(pat.ScopeRepoWrite)) |
| 43 | r.Put("/api/v1/repos/{owner}/{repo}/topics", h.repoTopicsReplace) |
| 44 | r.Delete("/api/v1/repos/{owner}/{repo}/topics", h.repoTopicsClear) |
| 45 | r.Post("/api/v1/repos/{owner}/{repo}/merge-upstream", h.repoMergeUpstream) |
| 46 | }) |
| 47 | } |
| 48 | |
| 49 | // ─── README ───────────────────────────────────────────────────────── |
| 50 | |
| 51 | type readmeResponse struct { |
| 52 | Name string `json:"name"` |
| 53 | Path string `json:"path"` |
| 54 | Size int64 `json:"size"` |
| 55 | Encoding string `json:"encoding"` |
| 56 | Content string `json:"content"` |
| 57 | DownloadURL string `json:"download_url"` |
| 58 | } |
| 59 | |
| 60 | const readmeMaxBytes = 1 << 20 // 1 MiB, matches the HTML render cap. |
| 61 | |
| 62 | func (h *Handlers) repoReadmeGet(w http.ResponseWriter, r *http.Request) { |
| 63 | repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoRead) |
| 64 | if !ok { |
| 65 | return |
| 66 | } |
| 67 | gitDir, err := h.repoGitDir(r.Context(), repo) |
| 68 | if err != nil { |
| 69 | h.d.Logger.ErrorContext(r.Context(), "api: readme repo path", "error", err) |
| 70 | writeAPIError(w, http.StatusInternalServerError, "lookup failed") |
| 71 | return |
| 72 | } |
| 73 | ref := strings.TrimSpace(r.URL.Query().Get("ref")) |
| 74 | if ref == "" { |
| 75 | ref = repo.DefaultBranch |
| 76 | } |
| 77 | entries, err := git.LsTree(r.Context(), gitDir, ref, "") |
| 78 | if err != nil { |
| 79 | if errors.Is(err, git.ErrRefNotFound) || errors.Is(err, git.ErrPathNotFound) { |
| 80 | writeAPIError(w, http.StatusNotFound, "ref not found") |
| 81 | return |
| 82 | } |
| 83 | h.d.Logger.ErrorContext(r.Context(), "api: readme ls-tree", "error", err) |
| 84 | writeAPIError(w, http.StatusInternalServerError, "lookup failed") |
| 85 | return |
| 86 | } |
| 87 | readmeName := pickREADME(entries) |
| 88 | if readmeName == "" { |
| 89 | writeAPIError(w, http.StatusNotFound, "no README found at ref") |
| 90 | return |
| 91 | } |
| 92 | body, err := git.ReadBlobBytes(r.Context(), gitDir, ref, readmeName, readmeMaxBytes) |
| 93 | if err != nil && !errors.Is(err, git.ErrBlobTooLarge) { |
| 94 | h.d.Logger.ErrorContext(r.Context(), "api: readme read", "error", err) |
| 95 | writeAPIError(w, http.StatusInternalServerError, "read failed") |
| 96 | return |
| 97 | } |
| 98 | |
| 99 | scheme := "http" |
| 100 | if r.TLS != nil { |
| 101 | scheme = "https" |
| 102 | } |
| 103 | downloadURL := scheme + "://" + r.Host + "/" + chi.URLParam(r, "owner") + "/" + repo.Name + "/raw/" + ref + "/" + readmeName |
| 104 | |
| 105 | writeJSON(w, http.StatusOK, readmeResponse{ |
| 106 | Name: readmeName, |
| 107 | Path: readmeName, |
| 108 | Size: int64(len(body)), |
| 109 | Encoding: "base64", |
| 110 | Content: base64.StdEncoding.EncodeToString(body), |
| 111 | DownloadURL: downloadURL, |
| 112 | }) |
| 113 | } |
| 114 | |
| 115 | // pickREADME returns the first entry whose name starts with "readme" |
| 116 | // (case-insensitive), preferring `.md` / `.markdown` over plain text |
| 117 | // to match the HTML code-view's choice when multiple README files |
| 118 | // exist at the same level (e.g. README.md + README.rst). |
| 119 | func pickREADME(entries []git.TreeEntry) string { |
| 120 | var fallback string |
| 121 | for _, e := range entries { |
| 122 | if e.Kind != git.EntryBlob { |
| 123 | continue |
| 124 | } |
| 125 | lower := strings.ToLower(e.Name) |
| 126 | if !strings.HasPrefix(lower, "readme") { |
| 127 | continue |
| 128 | } |
| 129 | if strings.HasSuffix(lower, ".md") || strings.HasSuffix(lower, ".markdown") { |
| 130 | return e.Name |
| 131 | } |
| 132 | if fallback == "" { |
| 133 | fallback = e.Name |
| 134 | } |
| 135 | } |
| 136 | return fallback |
| 137 | } |
| 138 | |
| 139 | // ─── topics ───────────────────────────────────────────────────────── |
| 140 | |
| 141 | type topicsRequest struct { |
| 142 | Names []string `json:"names"` |
| 143 | } |
| 144 | |
| 145 | type topicsResponse struct { |
| 146 | Names []string `json:"names"` |
| 147 | } |
| 148 | |
| 149 | func (h *Handlers) repoTopicsReplace(w http.ResponseWriter, r *http.Request) { |
| 150 | repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoWrite) |
| 151 | if !ok { |
| 152 | return |
| 153 | } |
| 154 | var body topicsRequest |
| 155 | if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 16*1024)).Decode(&body); err != nil { |
| 156 | writeAPIError(w, http.StatusBadRequest, "invalid JSON: "+err.Error()) |
| 157 | return |
| 158 | } |
| 159 | if err := repos.ReplaceTopics(r.Context(), h.reposDeps(), repo.ID, body.Names); err != nil { |
| 160 | writeTopicsError(w, err) |
| 161 | return |
| 162 | } |
| 163 | names, err := reposdb.New().ListRepoTopics(r.Context(), h.d.Pool, repo.ID) |
| 164 | if err != nil { |
| 165 | writeAPIError(w, http.StatusInternalServerError, "reload failed") |
| 166 | return |
| 167 | } |
| 168 | writeJSON(w, http.StatusOK, topicsResponse{Names: names}) |
| 169 | } |
| 170 | |
| 171 | func (h *Handlers) repoTopicsClear(w http.ResponseWriter, r *http.Request) { |
| 172 | repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoWrite) |
| 173 | if !ok { |
| 174 | return |
| 175 | } |
| 176 | if err := repos.ReplaceTopics(r.Context(), h.reposDeps(), repo.ID, []string{}); err != nil { |
| 177 | writeTopicsError(w, err) |
| 178 | return |
| 179 | } |
| 180 | w.WriteHeader(http.StatusNoContent) |
| 181 | } |
| 182 | |
| 183 | func writeTopicsError(w http.ResponseWriter, err error) { |
| 184 | switch { |
| 185 | case errors.Is(err, repos.ErrTooManyTopics): |
| 186 | writeAPIError(w, http.StatusUnprocessableEntity, "too many topics (max 20)") |
| 187 | case errors.Is(err, repos.ErrInvalidTopic): |
| 188 | writeAPIError(w, http.StatusUnprocessableEntity, "topic must be lowercase letters/digits/hyphens, 1-50 chars") |
| 189 | default: |
| 190 | writeAPIError(w, http.StatusInternalServerError, "internal error") |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | // ─── merge-upstream (fork sync) ───────────────────────────────────── |
| 195 | |
| 196 | type mergeUpstreamResponse struct { |
| 197 | Merged bool `json:"merged"` |
| 198 | OldOID string `json:"old_oid,omitempty"` |
| 199 | NewOID string `json:"new_oid,omitempty"` |
| 200 | BaseBranch string `json:"base_branch,omitempty"` |
| 201 | Message string `json:"message"` |
| 202 | } |
| 203 | |
| 204 | func (h *Handlers) repoMergeUpstream(w http.ResponseWriter, r *http.Request) { |
| 205 | repo, ok := h.resolveAPIRepo(w, r, policy.ActionRepoWrite) |
| 206 | if !ok { |
| 207 | return |
| 208 | } |
| 209 | if !repo.ForkOfRepoID.Valid { |
| 210 | writeAPIError(w, http.StatusUnprocessableEntity, "not a fork") |
| 211 | return |
| 212 | } |
| 213 | auth := middleware.PATAuthFromContext(r.Context()) |
| 214 | res, err := fork.Sync(r.Context(), h.forkDeps(), auth.UserID, repo.ID) |
| 215 | if err != nil { |
| 216 | writeForkSyncError(w, err) |
| 217 | return |
| 218 | } |
| 219 | writeJSON(w, http.StatusOK, mergeUpstreamResponse{ |
| 220 | Merged: res.OldOID != res.NewOID, |
| 221 | OldOID: res.OldOID, |
| 222 | NewOID: res.NewOID, |
| 223 | BaseBranch: repo.DefaultBranch, |
| 224 | Message: "fast-forwarded to upstream", |
| 225 | }) |
| 226 | } |
| 227 | |
| 228 | func writeForkSyncError(w http.ResponseWriter, err error) { |
| 229 | switch { |
| 230 | case errors.Is(err, fork.ErrSyncUpToDate): |
| 231 | // gh treats up-to-date as a 200 with merged=false; we surface |
| 232 | // the same shape so client logic doesn't have to special-case. |
| 233 | writeJSON(w, http.StatusOK, mergeUpstreamResponse{ |
| 234 | Merged: false, |
| 235 | Message: "already up to date", |
| 236 | }) |
| 237 | case errors.Is(err, fork.ErrSyncDiverged): |
| 238 | writeAPIError(w, http.StatusConflict, "fork has diverged from upstream; sync via your client") |
| 239 | case errors.Is(err, fork.ErrSyncRefRaced): |
| 240 | writeAPIError(w, http.StatusConflict, "ref changed concurrently; retry") |
| 241 | case errors.Is(err, fork.ErrSyncDefaultsMissing): |
| 242 | writeAPIError(w, http.StatusUnprocessableEntity, "source or fork default branch is empty") |
| 243 | case errors.Is(err, fork.ErrForkNotInitialized): |
| 244 | writeAPIError(w, http.StatusConflict, "fork is still being prepared; retry shortly") |
| 245 | default: |
| 246 | writeAPIError(w, http.StatusInternalServerError, "internal error") |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | // reposDeps builds a repos.Deps from the handler's Deps. The topics |
| 251 | // orchestrator only needs Pool today; we pass the full set so future |
| 252 | // hooks (audit, throttle) plug in without re-touching call sites. |
| 253 | func (h *Handlers) reposDeps() repos.Deps { |
| 254 | a := h.d.Audit |
| 255 | if a == nil { |
| 256 | a = audit.NewRecorder() |
| 257 | } |
| 258 | lim := h.d.Throttle |
| 259 | if lim == nil { |
| 260 | lim = throttle.NewLimiter() |
| 261 | } |
| 262 | return repos.Deps{ |
| 263 | Pool: h.d.Pool, |
| 264 | RepoFS: h.d.RepoFS, |
| 265 | Audit: a, |
| 266 | Limiter: lim, |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | // forkDeps builds a fork.Deps. The fork package needs RepoFS to |
| 271 | // resolve the on-disk bare repo for ref updates. |
| 272 | func (h *Handlers) forkDeps() fork.Deps { |
| 273 | a := h.d.Audit |
| 274 | if a == nil { |
| 275 | a = audit.NewRecorder() |
| 276 | } |
| 277 | return fork.Deps{ |
| 278 | Pool: h.d.Pool, |
| 279 | RepoFS: h.d.RepoFS, |
| 280 | Audit: a, |
| 281 | Logger: h.d.Logger, |
| 282 | } |
| 283 | } |
| 284 |