@@ -5,6 +5,7 @@ package trigger_test |
| 5 | 5 | import ( |
| 6 | 6 | "context" |
| 7 | 7 | "errors" |
| 8 | + "fmt" |
| 8 | 9 | "io" |
| 9 | 10 | "log/slog" |
| 10 | 11 | "strings" |
@@ -64,7 +65,7 @@ func setupEnq(t *testing.T) enqFx { |
| 64 | 65 | // steps. Used by every enqueue test. |
| 65 | 66 | func fixtureWorkflow(t *testing.T) *workflow.Workflow { |
| 66 | 67 | t.Helper() |
| 67 | | - src := []byte(`name: ci |
| 68 | + return workflowFromYAML(t, `name: ci |
| 68 | 69 | on: push |
| 69 | 70 | jobs: |
| 70 | 71 | build: |
@@ -73,7 +74,27 @@ jobs: |
| 73 | 74 | - uses: actions/checkout@v4 |
| 74 | 75 | - run: echo hello |
| 75 | 76 | `) |
| 76 | | - w, diags, err := workflow.Parse(src) |
| 77 | +} |
| 78 | + |
| 79 | +func concurrencyWorkflow(t *testing.T, group string, cancelInProgress bool) *workflow.Workflow { |
| 80 | + t.Helper() |
| 81 | + return workflowFromYAML(t, fmt.Sprintf(`name: ci |
| 82 | +on: push |
| 83 | +concurrency: |
| 84 | + group: "%s" |
| 85 | + cancel-in-progress: %t |
| 86 | +jobs: |
| 87 | + build: |
| 88 | + runs-on: ubuntu-latest |
| 89 | + steps: |
| 90 | + - uses: actions/checkout@v4 |
| 91 | + - run: echo hello |
| 92 | +`, group, cancelInProgress)) |
| 93 | +} |
| 94 | + |
| 95 | +func workflowFromYAML(t *testing.T, src string) *workflow.Workflow { |
| 96 | + t.Helper() |
| 97 | + w, diags, err := workflow.Parse([]byte(src)) |
| 77 | 98 | if err != nil { |
| 78 | 99 | t.Fatalf("parse fixture: %v", err) |
| 79 | 100 | } |
@@ -124,6 +145,174 @@ func TestEnqueue_HappyPath(t *testing.T) { |
| 124 | 145 | } |
| 125 | 146 | } |
| 126 | 147 | |
| 148 | +func TestEnqueue_ResolvesConcurrencyGroupExpression(t *testing.T) { |
| 149 | + f := setupEnq(t) |
| 150 | + ctx := context.Background() |
| 151 | + res, err := trigger.Enqueue(ctx, f.deps, trigger.EnqueueParams{ |
| 152 | + RepoID: f.repoID, |
| 153 | + WorkflowFile: ".shithub/workflows/ci.yml", |
| 154 | + HeadSHA: strings.Repeat("a", 40), |
| 155 | + HeadRef: "refs/heads/feature", |
| 156 | + EventKind: trigger.EventPush, |
| 157 | + EventPayload: map[string]any{"ref": "refs/heads/feature"}, |
| 158 | + ActorUserID: f.userID, |
| 159 | + TriggerEventID: "push:concurrency-expr", |
| 160 | + Workflow: concurrencyWorkflow(t, "branch-${{ shithub.ref }}", false), |
| 161 | + }) |
| 162 | + if err != nil { |
| 163 | + t.Fatalf("Enqueue: %v", err) |
| 164 | + } |
| 165 | + run, err := actionsdb.New().GetWorkflowRunByID(ctx, f.pool, res.RunID) |
| 166 | + if err != nil { |
| 167 | + t.Fatalf("GetWorkflowRunByID: %v", err) |
| 168 | + } |
| 169 | + if run.ConcurrencyGroup != "branch-refs/heads/feature" { |
| 170 | + t.Fatalf("concurrency_group: got %q", run.ConcurrencyGroup) |
| 171 | + } |
| 172 | +} |
| 173 | + |
| 174 | +func TestEnqueue_CancelInProgressCancelsOlderQueuedRun(t *testing.T) { |
| 175 | + f := setupEnq(t) |
| 176 | + ctx := context.Background() |
| 177 | + q := actionsdb.New() |
| 178 | + first, err := trigger.Enqueue(ctx, f.deps, trigger.EnqueueParams{ |
| 179 | + RepoID: f.repoID, |
| 180 | + WorkflowFile: ".shithub/workflows/ci.yml", |
| 181 | + HeadSHA: strings.Repeat("a", 40), |
| 182 | + HeadRef: "refs/heads/trunk", |
| 183 | + EventKind: trigger.EventPush, |
| 184 | + EventPayload: map[string]any{"ref": "refs/heads/trunk"}, |
| 185 | + ActorUserID: f.userID, |
| 186 | + TriggerEventID: "push:concurrency-cancel-1", |
| 187 | + Workflow: concurrencyWorkflow(t, "${{ shithub.ref }}", false), |
| 188 | + }) |
| 189 | + if err != nil { |
| 190 | + t.Fatalf("first Enqueue: %v", err) |
| 191 | + } |
| 192 | + second, err := trigger.Enqueue(ctx, f.deps, trigger.EnqueueParams{ |
| 193 | + RepoID: f.repoID, |
| 194 | + WorkflowFile: ".shithub/workflows/ci.yml", |
| 195 | + HeadSHA: strings.Repeat("b", 40), |
| 196 | + HeadRef: "refs/heads/trunk", |
| 197 | + EventKind: trigger.EventPush, |
| 198 | + EventPayload: map[string]any{"ref": "refs/heads/trunk"}, |
| 199 | + ActorUserID: f.userID, |
| 200 | + TriggerEventID: "push:concurrency-cancel-2", |
| 201 | + Workflow: concurrencyWorkflow(t, "${{ shithub.ref }}", true), |
| 202 | + }) |
| 203 | + if err != nil { |
| 204 | + t.Fatalf("second Enqueue: %v", err) |
| 205 | + } |
| 206 | + oldRun, err := q.GetWorkflowRunByID(ctx, f.pool, first.RunID) |
| 207 | + if err != nil { |
| 208 | + t.Fatalf("GetWorkflowRunByID old: %v", err) |
| 209 | + } |
| 210 | + if oldRun.Status != actionsdb.WorkflowRunStatusCompleted || |
| 211 | + !oldRun.Conclusion.Valid || |
| 212 | + oldRun.Conclusion.CheckConclusion != actionsdb.CheckConclusionCancelled { |
| 213 | + t.Fatalf("old run not cancelled: %+v", oldRun) |
| 214 | + } |
| 215 | + oldJobs, err := q.ListJobsForRun(ctx, f.pool, first.RunID) |
| 216 | + if err != nil { |
| 217 | + t.Fatalf("ListJobsForRun old: %v", err) |
| 218 | + } |
| 219 | + if len(oldJobs) != 1 || oldJobs[0].Status != actionsdb.WorkflowJobStatusCancelled { |
| 220 | + t.Fatalf("old jobs not cancelled: %+v", oldJobs) |
| 221 | + } |
| 222 | + oldSteps, err := q.ListStepsForJob(ctx, f.pool, oldJobs[0].ID) |
| 223 | + if err != nil { |
| 224 | + t.Fatalf("ListStepsForJob old: %v", err) |
| 225 | + } |
| 226 | + for _, step := range oldSteps { |
| 227 | + if step.Status != actionsdb.WorkflowStepStatusCancelled { |
| 228 | + t.Fatalf("step %d status: got %s want cancelled", step.ID, step.Status) |
| 229 | + } |
| 230 | + } |
| 231 | + newRun, err := q.GetWorkflowRunByID(ctx, f.pool, second.RunID) |
| 232 | + if err != nil { |
| 233 | + t.Fatalf("GetWorkflowRunByID new: %v", err) |
| 234 | + } |
| 235 | + if newRun.Status != actionsdb.WorkflowRunStatusQueued { |
| 236 | + t.Fatalf("new run status: got %s want queued", newRun.Status) |
| 237 | + } |
| 238 | +} |
| 239 | + |
| 240 | +func TestClaimQueuedWorkflowJob_BlocksYoungerConcurrencyRun(t *testing.T) { |
| 241 | + f := setupEnq(t) |
| 242 | + ctx := context.Background() |
| 243 | + q := actionsdb.New() |
| 244 | + first, err := trigger.Enqueue(ctx, f.deps, trigger.EnqueueParams{ |
| 245 | + RepoID: f.repoID, |
| 246 | + WorkflowFile: ".shithub/workflows/ci.yml", |
| 247 | + HeadSHA: strings.Repeat("c", 40), |
| 248 | + HeadRef: "refs/heads/trunk", |
| 249 | + EventKind: trigger.EventPush, |
| 250 | + EventPayload: map[string]any{"ref": "refs/heads/trunk"}, |
| 251 | + ActorUserID: f.userID, |
| 252 | + TriggerEventID: "push:concurrency-block-1", |
| 253 | + Workflow: concurrencyWorkflow(t, "${{ shithub.ref }}", false), |
| 254 | + }) |
| 255 | + if err != nil { |
| 256 | + t.Fatalf("first Enqueue: %v", err) |
| 257 | + } |
| 258 | + second, err := trigger.Enqueue(ctx, f.deps, trigger.EnqueueParams{ |
| 259 | + RepoID: f.repoID, |
| 260 | + WorkflowFile: ".shithub/workflows/ci.yml", |
| 261 | + HeadSHA: strings.Repeat("d", 40), |
| 262 | + HeadRef: "refs/heads/trunk", |
| 263 | + EventKind: trigger.EventPush, |
| 264 | + EventPayload: map[string]any{"ref": "refs/heads/trunk"}, |
| 265 | + ActorUserID: f.userID, |
| 266 | + TriggerEventID: "push:concurrency-block-2", |
| 267 | + Workflow: concurrencyWorkflow(t, "${{ shithub.ref }}", false), |
| 268 | + }) |
| 269 | + if err != nil { |
| 270 | + t.Fatalf("second Enqueue: %v", err) |
| 271 | + } |
| 272 | + runner, err := q.InsertRunner(ctx, f.pool, actionsdb.InsertRunnerParams{ |
| 273 | + Name: "runner-block", |
| 274 | + Labels: []string{"ubuntu-latest"}, |
| 275 | + Capacity: 2, |
| 276 | + }) |
| 277 | + if err != nil { |
| 278 | + t.Fatalf("InsertRunner: %v", err) |
| 279 | + } |
| 280 | + claimed, err := q.ClaimQueuedWorkflowJob(ctx, f.pool, actionsdb.ClaimQueuedWorkflowJobParams{ |
| 281 | + Labels: []string{"ubuntu-latest"}, |
| 282 | + RunnerID: runner.ID, |
| 283 | + }) |
| 284 | + if err != nil { |
| 285 | + t.Fatalf("first ClaimQueuedWorkflowJob: %v", err) |
| 286 | + } |
| 287 | + if claimed.RunID != first.RunID { |
| 288 | + t.Fatalf("claimed run: got %d want first run %d", claimed.RunID, first.RunID) |
| 289 | + } |
| 290 | + _, err = q.ClaimQueuedWorkflowJob(ctx, f.pool, actionsdb.ClaimQueuedWorkflowJobParams{ |
| 291 | + Labels: []string{"ubuntu-latest"}, |
| 292 | + RunnerID: runner.ID, |
| 293 | + }) |
| 294 | + if !errors.Is(err, pgx.ErrNoRows) { |
| 295 | + t.Fatalf("second claim error: got %v want pgx.ErrNoRows", err) |
| 296 | + } |
| 297 | + changed, err := q.RequestWorkflowRunCancel(ctx, f.pool, first.RunID) |
| 298 | + if err != nil { |
| 299 | + t.Fatalf("RequestWorkflowRunCancel: %v", err) |
| 300 | + } |
| 301 | + if len(changed) != 1 || !changed[0].CancelRequested { |
| 302 | + t.Fatalf("cancel request did not release blocker: %+v", changed) |
| 303 | + } |
| 304 | + released, err := q.ClaimQueuedWorkflowJob(ctx, f.pool, actionsdb.ClaimQueuedWorkflowJobParams{ |
| 305 | + Labels: []string{"ubuntu-latest"}, |
| 306 | + RunnerID: runner.ID, |
| 307 | + }) |
| 308 | + if err != nil { |
| 309 | + t.Fatalf("claim after cancel request: %v", err) |
| 310 | + } |
| 311 | + if released.RunID != second.RunID { |
| 312 | + t.Fatalf("released claim run: got %d want second run %d", released.RunID, second.RunID) |
| 313 | + } |
| 314 | +} |
| 315 | + |
| 127 | 316 | func TestEnqueue_IdempotentSecondCall(t *testing.T) { |
| 128 | 317 | f := setupEnq(t) |
| 129 | 318 | ctx := context.Background() |