@@ -0,0 +1,292 @@ |
| 1 | +// SPDX-License-Identifier: AGPL-3.0-or-later |
| 2 | + |
| 3 | +package jobs_test |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "encoding/json" |
| 8 | + "log/slog" |
| 9 | + "os" |
| 10 | + "strings" |
| 11 | + "testing" |
| 12 | + "time" |
| 13 | + |
| 14 | + "github.com/jackc/pgx/v5/pgtype" |
| 15 | + |
| 16 | + "github.com/tenseleyFlow/shithub/internal/auth/audit" |
| 17 | + "github.com/tenseleyFlow/shithub/internal/auth/throttle" |
| 18 | + "github.com/tenseleyFlow/shithub/internal/infra/storage" |
| 19 | + "github.com/tenseleyFlow/shithub/internal/repos" |
| 20 | + repogit "github.com/tenseleyFlow/shithub/internal/repos/git" |
| 21 | + reposdb "github.com/tenseleyFlow/shithub/internal/repos/sqlc" |
| 22 | + "github.com/tenseleyFlow/shithub/internal/testing/dbtest" |
| 23 | + usersdb "github.com/tenseleyFlow/shithub/internal/users/sqlc" |
| 24 | + "github.com/tenseleyFlow/shithub/internal/worker/jobs" |
| 25 | + workerdb "github.com/tenseleyFlow/shithub/internal/worker/sqlc" |
| 26 | +) |
| 27 | + |
| 28 | +const fixtureHash = "$argon2id$v=19$m=16384,t=1,p=1$" + |
| 29 | + "AAAAAAAAAAAAAAAA$" + |
| 30 | + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" |
| 31 | + |
| 32 | +// TestPushProcess_HappyPath exercises the full push:process pipeline |
| 33 | +// against real Postgres + real bare repo. Verifies that the handler: |
| 34 | +// - sets repos.default_branch_oid when the ref is the default branch, |
| 35 | +// - inserts a webhook_events_pending row, |
| 36 | +// - marks the push_event processed_at, |
| 37 | +// - enqueues a follow-up repo:size_recalc job. |
| 38 | +func TestPushProcess_HappyPath(t *testing.T) { |
| 39 | + t.Parallel() |
| 40 | + pool := dbtest.NewTestDB(t) |
| 41 | + root := t.TempDir() |
| 42 | + rfs, err := storage.NewRepoFS(root) |
| 43 | + if err != nil { |
| 44 | + t.Fatalf("NewRepoFS: %v", err) |
| 45 | + } |
| 46 | + |
| 47 | + // User + verified email so repos.Create accepts a templated initial |
| 48 | + // commit (the create path needs author identity for plumbing). |
| 49 | + uq := usersdb.New() |
| 50 | + user, err := uq.CreateUser(context.Background(), pool, usersdb.CreateUserParams{ |
| 51 | + Username: "alice", DisplayName: "alice", PasswordHash: fixtureHash, |
| 52 | + }) |
| 53 | + if err != nil { |
| 54 | + t.Fatalf("CreateUser: %v", err) |
| 55 | + } |
| 56 | + em, err := uq.CreateUserEmail(context.Background(), pool, usersdb.CreateUserEmailParams{ |
| 57 | + UserID: user.ID, Email: "alice@example.com", IsPrimary: true, Verified: true, |
| 58 | + }) |
| 59 | + if err != nil { |
| 60 | + t.Fatalf("CreateUserEmail: %v", err) |
| 61 | + } |
| 62 | + _ = uq.LinkUserPrimaryEmail(context.Background(), pool, usersdb.LinkUserPrimaryEmailParams{ |
| 63 | + ID: user.ID, PrimaryEmailID: pgtype.Int8{Int64: em.ID, Valid: true}, |
| 64 | + }) |
| 65 | + |
| 66 | + res, err := repos.Create(context.Background(), repos.Deps{ |
| 67 | + Pool: pool, RepoFS: rfs, Audit: audit.NewRecorder(), Limiter: throttle.NewLimiter(), |
| 68 | + }, repos.Params{ |
| 69 | + OwnerUserID: user.ID, OwnerUsername: "alice", |
| 70 | + Name: "demo", Visibility: "public", InitReadme: true, |
| 71 | + }) |
| 72 | + if err != nil { |
| 73 | + t.Fatalf("repos.Create: %v", err) |
| 74 | + } |
| 75 | + |
| 76 | + // Insert a push_event covering the initial commit on refs/heads/trunk. |
| 77 | + wq := workerdb.New() |
| 78 | + event, err := wq.InsertPushEvent(context.Background(), pool, workerdb.InsertPushEventParams{ |
| 79 | + RepoID: res.Repo.ID, |
| 80 | + BeforeSha: strings.Repeat("0", 40), |
| 81 | + AfterSha: res.InitialCommitOID, |
| 82 | + Ref: "refs/heads/trunk", |
| 83 | + Protocol: "ssh", |
| 84 | + PusherUserID: pgtype.Int8{Int64: user.ID, Valid: true}, |
| 85 | + RequestID: pgtype.Text{String: "test-req", Valid: true}, |
| 86 | + }) |
| 87 | + if err != nil { |
| 88 | + t.Fatalf("InsertPushEvent: %v", err) |
| 89 | + } |
| 90 | + |
| 91 | + // Run the handler directly. |
| 92 | + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) |
| 93 | + handler := jobs.PushProcess(jobs.PushProcessDeps{Pool: pool, RepoFS: rfs, Logger: logger}) |
| 94 | + payload, _ := json.Marshal(jobs.PushProcessPayload{PushEventID: event.ID}) |
| 95 | + if err := handler(context.Background(), payload); err != nil { |
| 96 | + t.Fatalf("push:process: %v", err) |
| 97 | + } |
| 98 | + |
| 99 | + // Default branch OID should now match the initial commit. |
| 100 | + rq := reposdb.New() |
| 101 | + repo, err := rq.GetRepoByID(context.Background(), pool, res.Repo.ID) |
| 102 | + if err != nil { |
| 103 | + t.Fatalf("GetRepoByID: %v", err) |
| 104 | + } |
| 105 | + if !repo.DefaultBranchOid.Valid || repo.DefaultBranchOid.String != res.InitialCommitOID { |
| 106 | + t.Errorf("default_branch_oid = %v, want %q", repo.DefaultBranchOid, res.InitialCommitOID) |
| 107 | + } |
| 108 | + |
| 109 | + // Push event marked processed. |
| 110 | + got, err := wq.GetPushEvent(context.Background(), pool, event.ID) |
| 111 | + if err != nil { |
| 112 | + t.Fatalf("GetPushEvent: %v", err) |
| 113 | + } |
| 114 | + if !got.ProcessedAt.Valid { |
| 115 | + t.Errorf("processed_at not set") |
| 116 | + } |
| 117 | + |
| 118 | + // Webhook event row exists. |
| 119 | + var webhookCount int |
| 120 | + row := pool.QueryRow(context.Background(), |
| 121 | + `SELECT count(*) FROM webhook_events_pending WHERE repo_id = $1 AND event_kind = 'push'`, |
| 122 | + res.Repo.ID) |
| 123 | + if err := row.Scan(&webhookCount); err != nil { |
| 124 | + t.Fatalf("count webhook_events_pending: %v", err) |
| 125 | + } |
| 126 | + if webhookCount != 1 { |
| 127 | + t.Errorf("webhook_events_pending count = %d, want 1", webhookCount) |
| 128 | + } |
| 129 | + |
| 130 | + // repo:size_recalc job enqueued. |
| 131 | + var sizeJobCount int |
| 132 | + row = pool.QueryRow(context.Background(), |
| 133 | + `SELECT count(*) FROM jobs WHERE kind = 'repo:size_recalc' AND completed_at IS NULL AND failed_at IS NULL`) |
| 134 | + _ = row.Scan(&sizeJobCount) |
| 135 | + if sizeJobCount < 1 { |
| 136 | + t.Errorf("repo:size_recalc not enqueued (count=%d)", sizeJobCount) |
| 137 | + } |
| 138 | + |
| 139 | + // Sanity: re-running is a no-op (idempotent on processed_at). |
| 140 | + if err := handler(context.Background(), payload); err != nil { |
| 141 | + t.Fatalf("re-run: %v", err) |
| 142 | + } |
| 143 | +} |
| 144 | + |
| 145 | +// TestRepoSizeRecalc_UpdatesDiskUsedBytes drives the size recalc end-to- |
| 146 | +// end against a real repo and verifies disk_used_bytes is non-zero. |
| 147 | +func TestRepoSizeRecalc_UpdatesDiskUsedBytes(t *testing.T) { |
| 148 | + t.Parallel() |
| 149 | + pool := dbtest.NewTestDB(t) |
| 150 | + root := t.TempDir() |
| 151 | + rfs, err := storage.NewRepoFS(root) |
| 152 | + if err != nil { |
| 153 | + t.Fatalf("NewRepoFS: %v", err) |
| 154 | + } |
| 155 | + |
| 156 | + uq := usersdb.New() |
| 157 | + user, _ := uq.CreateUser(context.Background(), pool, usersdb.CreateUserParams{ |
| 158 | + Username: "bob", DisplayName: "bob", PasswordHash: fixtureHash, |
| 159 | + }) |
| 160 | + em, _ := uq.CreateUserEmail(context.Background(), pool, usersdb.CreateUserEmailParams{ |
| 161 | + UserID: user.ID, Email: "bob@example.com", IsPrimary: true, Verified: true, |
| 162 | + }) |
| 163 | + _ = uq.LinkUserPrimaryEmail(context.Background(), pool, usersdb.LinkUserPrimaryEmailParams{ |
| 164 | + ID: user.ID, PrimaryEmailID: pgtype.Int8{Int64: em.ID, Valid: true}, |
| 165 | + }) |
| 166 | + res, err := repos.Create(context.Background(), repos.Deps{ |
| 167 | + Pool: pool, RepoFS: rfs, Audit: audit.NewRecorder(), Limiter: throttle.NewLimiter(), |
| 168 | + }, repos.Params{ |
| 169 | + OwnerUserID: user.ID, OwnerUsername: "bob", |
| 170 | + Name: "demo", Visibility: "public", InitReadme: true, |
| 171 | + }) |
| 172 | + if err != nil { |
| 173 | + t.Fatalf("repos.Create: %v", err) |
| 174 | + } |
| 175 | + |
| 176 | + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) |
| 177 | + handler := jobs.RepoSizeRecalc(jobs.RepoSizeRecalcDeps{Pool: pool, RepoFS: rfs, Logger: logger}) |
| 178 | + payload, _ := json.Marshal(jobs.RepoSizeRecalcPayload{RepoID: res.Repo.ID}) |
| 179 | + if err := handler(context.Background(), payload); err != nil { |
| 180 | + t.Fatalf("repo:size_recalc: %v", err) |
| 181 | + } |
| 182 | + |
| 183 | + rq := reposdb.New() |
| 184 | + repo, _ := rq.GetRepoByID(context.Background(), pool, res.Repo.ID) |
| 185 | + if repo.DiskUsedBytes <= 0 { |
| 186 | + t.Errorf("disk_used_bytes = %d, want > 0", repo.DiskUsedBytes) |
| 187 | + } |
| 188 | +} |
| 189 | + |
| 190 | +// TestPushProcess_BranchNotDefault: a push to refs/heads/feat shouldn't |
| 191 | +// overwrite default_branch_oid on a repo whose default_branch is trunk. |
| 192 | +func TestPushProcess_BranchNotDefault(t *testing.T) { |
| 193 | + t.Parallel() |
| 194 | + pool := dbtest.NewTestDB(t) |
| 195 | + root := t.TempDir() |
| 196 | + rfs, _ := storage.NewRepoFS(root) |
| 197 | + uq := usersdb.New() |
| 198 | + user, _ := uq.CreateUser(context.Background(), pool, usersdb.CreateUserParams{ |
| 199 | + Username: "carol", DisplayName: "carol", PasswordHash: fixtureHash, |
| 200 | + }) |
| 201 | + em, _ := uq.CreateUserEmail(context.Background(), pool, usersdb.CreateUserEmailParams{ |
| 202 | + UserID: user.ID, Email: "carol@example.com", IsPrimary: true, Verified: true, |
| 203 | + }) |
| 204 | + _ = uq.LinkUserPrimaryEmail(context.Background(), pool, usersdb.LinkUserPrimaryEmailParams{ |
| 205 | + ID: user.ID, PrimaryEmailID: pgtype.Int8{Int64: em.ID, Valid: true}, |
| 206 | + }) |
| 207 | + res, _ := repos.Create(context.Background(), repos.Deps{ |
| 208 | + Pool: pool, RepoFS: rfs, Audit: audit.NewRecorder(), Limiter: throttle.NewLimiter(), |
| 209 | + }, repos.Params{ |
| 210 | + OwnerUserID: user.ID, OwnerUsername: "carol", |
| 211 | + Name: "demo", Visibility: "public", InitReadme: true, |
| 212 | + }) |
| 213 | + |
| 214 | + wq := workerdb.New() |
| 215 | + event, err := wq.InsertPushEvent(context.Background(), pool, workerdb.InsertPushEventParams{ |
| 216 | + RepoID: res.Repo.ID, |
| 217 | + BeforeSha: strings.Repeat("0", 40), |
| 218 | + AfterSha: "deadbeef" + strings.Repeat("0", 32), |
| 219 | + Ref: "refs/heads/feat", |
| 220 | + Protocol: "ssh", |
| 221 | + PusherUserID: pgtype.Int8{Int64: user.ID, Valid: true}, |
| 222 | + }) |
| 223 | + if err != nil { |
| 224 | + t.Fatalf("InsertPushEvent: %v", err) |
| 225 | + } |
| 226 | + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) |
| 227 | + handler := jobs.PushProcess(jobs.PushProcessDeps{Pool: pool, RepoFS: rfs, Logger: logger}) |
| 228 | + payload, _ := json.Marshal(jobs.PushProcessPayload{PushEventID: event.ID}) |
| 229 | + if err := handler(context.Background(), payload); err != nil { |
| 230 | + t.Fatalf("push:process: %v", err) |
| 231 | + } |
| 232 | + rq := reposdb.New() |
| 233 | + repo, _ := rq.GetRepoByID(context.Background(), pool, res.Repo.ID) |
| 234 | + if repo.DefaultBranchOid.Valid { |
| 235 | + t.Errorf("default_branch_oid set to %q for non-default ref", repo.DefaultBranchOid.String) |
| 236 | + } |
| 237 | +} |
| 238 | + |
| 239 | +// Sanity check that the package's core helpers don't break under nil |
| 240 | +// payload (defensive — production hooks always send populated payloads). |
| 241 | +func TestPushProcess_RejectsBadPayload(t *testing.T) { |
| 242 | + t.Parallel() |
| 243 | + pool := dbtest.NewTestDB(t) |
| 244 | + rfs, _ := storage.NewRepoFS(t.TempDir()) |
| 245 | + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) |
| 246 | + handler := jobs.PushProcess(jobs.PushProcessDeps{Pool: pool, RepoFS: rfs, Logger: logger}) |
| 247 | + |
| 248 | + // Empty payload → poison. |
| 249 | + if err := handler(context.Background(), json.RawMessage(`{}`)); err == nil { |
| 250 | + t.Errorf("empty payload: want error, got nil") |
| 251 | + } |
| 252 | + // Reference unknown event → poison. |
| 253 | + if err := handler(context.Background(), json.RawMessage(`{"push_event_id": 99999}`)); err == nil { |
| 254 | + t.Errorf("missing event: want error, got nil") |
| 255 | + } |
| 256 | +} |
| 257 | + |
| 258 | +// Belt + braces: when InitReadme=true the initial commit has a real OID |
| 259 | +// matching the on-disk HEAD, which validates our test fixtures match |
| 260 | +// reality. |
| 261 | +func TestRepoFixture_HeadMatchesInitialCommit(t *testing.T) { |
| 262 | + t.Parallel() |
| 263 | + pool := dbtest.NewTestDB(t) |
| 264 | + root := t.TempDir() |
| 265 | + rfs, _ := storage.NewRepoFS(root) |
| 266 | + uq := usersdb.New() |
| 267 | + user, _ := uq.CreateUser(context.Background(), pool, usersdb.CreateUserParams{ |
| 268 | + Username: "dave", DisplayName: "dave", PasswordHash: fixtureHash, |
| 269 | + }) |
| 270 | + em, _ := uq.CreateUserEmail(context.Background(), pool, usersdb.CreateUserEmailParams{ |
| 271 | + UserID: user.ID, Email: "dave@example.com", IsPrimary: true, Verified: true, |
| 272 | + }) |
| 273 | + _ = uq.LinkUserPrimaryEmail(context.Background(), pool, usersdb.LinkUserPrimaryEmailParams{ |
| 274 | + ID: user.ID, PrimaryEmailID: pgtype.Int8{Int64: em.ID, Valid: true}, |
| 275 | + }) |
| 276 | + res, _ := repos.Create(context.Background(), repos.Deps{ |
| 277 | + Pool: pool, RepoFS: rfs, Audit: audit.NewRecorder(), Limiter: throttle.NewLimiter(), |
| 278 | + }, repos.Params{ |
| 279 | + OwnerUserID: user.ID, OwnerUsername: "dave", |
| 280 | + Name: "demo", Visibility: "public", InitReadme: true, |
| 281 | + }) |
| 282 | + gitDir, _ := rfs.RepoPath("dave", "demo") |
| 283 | + head, found, err := repogit.HeadOf(context.Background(), gitDir, "trunk") |
| 284 | + if err != nil || !found { |
| 285 | + t.Fatalf("HeadOf trunk: found=%v err=%v", found, err) |
| 286 | + } |
| 287 | + if head.OID != res.InitialCommitOID { |
| 288 | + t.Errorf("HeadOf.OID = %q, want %q", head.OID, res.InitialCommitOID) |
| 289 | + } |
| 290 | + // brevity check so the linter is happy with imports. |
| 291 | + _ = time.Second |
| 292 | +} |