@@ -4,21 +4,27 @@ package api |
| 4 | | 4 | |
| 5 | import ( | 5 | import ( |
| 6 | "context" | 6 | "context" |
| | 7 | + "encoding/base64" |
| 7 | "encoding/json" | 8 | "encoding/json" |
| 8 | "errors" | 9 | "errors" |
| 9 | "fmt" | 10 | "fmt" |
| 10 | "io" | 11 | "io" |
| 11 | "net/http" | 12 | "net/http" |
| | 13 | + "regexp" |
| | 14 | + "strconv" |
| 12 | "strings" | 15 | "strings" |
| 13 | "time" | 16 | "time" |
| 14 | | 17 | |
| 15 | "github.com/go-chi/chi/v5" | 18 | "github.com/go-chi/chi/v5" |
| 16 | "github.com/jackc/pgx/v5" | 19 | "github.com/jackc/pgx/v5" |
| | 20 | + "github.com/jackc/pgx/v5/pgtype" |
| 17 | | 21 | |
| 18 | "github.com/tenseleyFlow/shithub/internal/actions/runnerlabels" | 22 | "github.com/tenseleyFlow/shithub/internal/actions/runnerlabels" |
| 19 | "github.com/tenseleyFlow/shithub/internal/actions/runnertoken" | 23 | "github.com/tenseleyFlow/shithub/internal/actions/runnertoken" |
| 20 | actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc" | 24 | actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc" |
| 21 | "github.com/tenseleyFlow/shithub/internal/auth/runnerjwt" | 25 | "github.com/tenseleyFlow/shithub/internal/auth/runnerjwt" |
| | 26 | + "github.com/tenseleyFlow/shithub/internal/checks" |
| | 27 | + checksdb "github.com/tenseleyFlow/shithub/internal/checks/sqlc" |
| 22 | "github.com/tenseleyFlow/shithub/internal/ratelimit" | 28 | "github.com/tenseleyFlow/shithub/internal/ratelimit" |
| 23 | ) | 29 | ) |
| 24 | | 30 | |
@@ -30,6 +36,10 @@ var runnerHeartbeatLimit = ratelimit.Policy{ |
| 30 | | 36 | |
| 31 | func (h *Handlers) mountRunners(r chi.Router) { | 37 | func (h *Handlers) mountRunners(r chi.Router) { |
| 32 | r.Post("/api/v1/runners/heartbeat", h.runnerHeartbeat) | 38 | r.Post("/api/v1/runners/heartbeat", h.runnerHeartbeat) |
| | 39 | + r.Post("/api/v1/jobs/{id}/logs", h.runnerJobLogs) |
| | 40 | + r.Post("/api/v1/jobs/{id}/status", h.runnerJobStatus) |
| | 41 | + r.Post("/api/v1/jobs/{id}/artifacts/upload", h.runnerJobArtifactUpload) |
| | 42 | + r.Post("/api/v1/jobs/{id}/cancel-check", h.runnerJobCancelCheck) |
| 33 | } | 43 | } |
| 34 | | 44 | |
| 35 | type runnerHeartbeatRequest struct { | 45 | type runnerHeartbeatRequest struct { |
@@ -226,6 +236,470 @@ func (h *Handlers) claimRunnerJob( |
| 226 | return job, steps, true, nil | 236 | return job, steps, true, nil |
| 227 | } | 237 | } |
| 228 | | 238 | |
| | 239 | +type runnerJobAuth struct { |
| | 240 | + Claims runnerjwt.Claims |
| | 241 | + RunnerID int64 |
| | 242 | + Job actionsdb.WorkflowJob |
| | 243 | +} |
| | 244 | + |
| | 245 | +func (h *Handlers) authenticateRunnerJob(w http.ResponseWriter, r *http.Request) (runnerJobAuth, bool) { |
| | 246 | + if h.d.RunnerJWT == nil { |
| | 247 | + writeAPIError(w, http.StatusServiceUnavailable, "runner API is not configured") |
| | 248 | + return runnerJobAuth{}, false |
| | 249 | + } |
| | 250 | + pathJobID, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64) |
| | 251 | + if err != nil || pathJobID <= 0 { |
| | 252 | + writeAPIError(w, http.StatusNotFound, "job not found") |
| | 253 | + return runnerJobAuth{}, false |
| | 254 | + } |
| | 255 | + const prefix = "Bearer " |
| | 256 | + authz := r.Header.Get("Authorization") |
| | 257 | + if !strings.HasPrefix(authz, prefix) { |
| | 258 | + writeAPIError(w, http.StatusUnauthorized, "job token required") |
| | 259 | + return runnerJobAuth{}, false |
| | 260 | + } |
| | 261 | + claims, err := h.d.RunnerJWT.Verify(strings.TrimSpace(strings.TrimPrefix(authz, prefix))) |
| | 262 | + if err != nil { |
| | 263 | + writeAPIError(w, http.StatusUnauthorized, "job token invalid") |
| | 264 | + return runnerJobAuth{}, false |
| | 265 | + } |
| | 266 | + if claims.JobID != pathJobID { |
| | 267 | + writeAPIError(w, http.StatusNotFound, "job not found") |
| | 268 | + return runnerJobAuth{}, false |
| | 269 | + } |
| | 270 | + runnerID, err := claims.RunnerID() |
| | 271 | + if err != nil { |
| | 272 | + writeAPIError(w, http.StatusUnauthorized, "job token invalid") |
| | 273 | + return runnerJobAuth{}, false |
| | 274 | + } |
| | 275 | + job, err := actionsdb.New().GetWorkflowJobByID(r.Context(), h.d.Pool, pathJobID) |
| | 276 | + if err != nil { |
| | 277 | + if errors.Is(err, pgx.ErrNoRows) { |
| | 278 | + writeAPIError(w, http.StatusNotFound, "job not found") |
| | 279 | + } else { |
| | 280 | + writeAPIError(w, http.StatusInternalServerError, "job lookup failed") |
| | 281 | + } |
| | 282 | + return runnerJobAuth{}, false |
| | 283 | + } |
| | 284 | + if job.RunID != claims.RunID || !job.RunnerID.Valid || job.RunnerID.Int64 != runnerID { |
| | 285 | + writeAPIError(w, http.StatusNotFound, "job not found") |
| | 286 | + return runnerJobAuth{}, false |
| | 287 | + } |
| | 288 | + if err := runnerjwt.Consume(r.Context(), h.d.Pool, claims); err != nil { |
| | 289 | + if errors.Is(err, runnerjwt.ErrReplay) { |
| | 290 | + writeAPIError(w, http.StatusUnauthorized, "job token replayed") |
| | 291 | + } else { |
| | 292 | + h.d.Logger.ErrorContext(r.Context(), "runner jwt consume failed", "job_id", pathJobID, "error", err) |
| | 293 | + writeAPIError(w, http.StatusUnauthorized, "job token invalid") |
| | 294 | + } |
| | 295 | + return runnerJobAuth{}, false |
| | 296 | + } |
| | 297 | + return runnerJobAuth{Claims: claims, RunnerID: runnerID, Job: job}, true |
| | 298 | +} |
| | 299 | + |
| | 300 | +type runnerLogRequest struct { |
| | 301 | + Seq int32 `json:"seq"` |
| | 302 | + Chunk string `json:"chunk"` |
| | 303 | + StepID int64 `json:"step_id,omitempty"` |
| | 304 | +} |
| | 305 | + |
| | 306 | +func (h *Handlers) runnerJobLogs(w http.ResponseWriter, r *http.Request) { |
| | 307 | + auth, ok := h.authenticateRunnerJob(w, r) |
| | 308 | + if !ok { |
| | 309 | + return |
| | 310 | + } |
| | 311 | + var body runnerLogRequest |
| | 312 | + if err := decodeJSONBody(r.Body, &body); err != nil { |
| | 313 | + writeAPIError(w, http.StatusBadRequest, "invalid JSON: "+err.Error()) |
| | 314 | + return |
| | 315 | + } |
| | 316 | + if body.Seq < 0 { |
| | 317 | + writeAPIError(w, http.StatusBadRequest, "seq must be non-negative") |
| | 318 | + return |
| | 319 | + } |
| | 320 | + chunk, err := decodeBase64(body.Chunk) |
| | 321 | + if err != nil { |
| | 322 | + writeAPIError(w, http.StatusBadRequest, "chunk must be base64") |
| | 323 | + return |
| | 324 | + } |
| | 325 | + if len(chunk) == 0 || len(chunk) > 512*1024 { |
| | 326 | + writeAPIError(w, http.StatusBadRequest, "chunk must be between 1 and 524288 bytes") |
| | 327 | + return |
| | 328 | + } |
| | 329 | + stepID, ok := h.resolveLogStep(w, r, auth.Job.ID, body.StepID) |
| | 330 | + if !ok { |
| | 331 | + return |
| | 332 | + } |
| | 333 | + if _, err := actionsdb.New().AppendStepLogChunk(r.Context(), h.d.Pool, actionsdb.AppendStepLogChunkParams{ |
| | 334 | + StepID: stepID, |
| | 335 | + Seq: body.Seq, |
| | 336 | + Chunk: chunk, |
| | 337 | + }); err != nil && !errors.Is(err, pgx.ErrNoRows) { |
| | 338 | + writeAPIError(w, http.StatusInternalServerError, "append log failed") |
| | 339 | + return |
| | 340 | + } |
| | 341 | + h.writeNextTokenResponse(w, r, http.StatusAccepted, auth, map[string]any{"accepted": true}) |
| | 342 | +} |
| | 343 | + |
| | 344 | +func (h *Handlers) resolveLogStep(w http.ResponseWriter, r *http.Request, jobID, stepID int64) (int64, bool) { |
| | 345 | + q := actionsdb.New() |
| | 346 | + if stepID == 0 { |
| | 347 | + step, err := q.GetFirstStepForJob(r.Context(), h.d.Pool, jobID) |
| | 348 | + if err != nil { |
| | 349 | + writeAPIError(w, http.StatusNotFound, "step not found") |
| | 350 | + return 0, false |
| | 351 | + } |
| | 352 | + return step.ID, true |
| | 353 | + } |
| | 354 | + step, err := q.GetWorkflowStepByID(r.Context(), h.d.Pool, stepID) |
| | 355 | + if err != nil || step.JobID != jobID { |
| | 356 | + writeAPIError(w, http.StatusNotFound, "step not found") |
| | 357 | + return 0, false |
| | 358 | + } |
| | 359 | + return step.ID, true |
| | 360 | +} |
| | 361 | + |
| | 362 | +type runnerStatusRequest struct { |
| | 363 | + Status string `json:"status"` |
| | 364 | + Conclusion string `json:"conclusion,omitempty"` |
| | 365 | + StartedAt string `json:"started_at,omitempty"` |
| | 366 | + CompletedAt string `json:"completed_at,omitempty"` |
| | 367 | +} |
| | 368 | + |
| | 369 | +func (h *Handlers) runnerJobStatus(w http.ResponseWriter, r *http.Request) { |
| | 370 | + auth, ok := h.authenticateRunnerJob(w, r) |
| | 371 | + if !ok { |
| | 372 | + return |
| | 373 | + } |
| | 374 | + var body runnerStatusRequest |
| | 375 | + if err := decodeJSONBody(r.Body, &body); err != nil { |
| | 376 | + writeAPIError(w, http.StatusBadRequest, "invalid JSON: "+err.Error()) |
| | 377 | + return |
| | 378 | + } |
| | 379 | + update, terminal, err := normalizeJobStatusUpdate(auth.Job, body) |
| | 380 | + if err != nil { |
| | 381 | + writeAPIError(w, http.StatusBadRequest, err.Error()) |
| | 382 | + return |
| | 383 | + } |
| | 384 | + updated, runCompleted, runConclusion, err := h.applyJobStatus(r.Context(), auth.Job, update) |
| | 385 | + if err != nil { |
| | 386 | + writeAPIError(w, http.StatusInternalServerError, "status update failed") |
| | 387 | + return |
| | 388 | + } |
| | 389 | + if err := h.updateCheckRunForJob(r.Context(), updated); err != nil { |
| | 390 | + h.d.Logger.WarnContext(r.Context(), "runner check_run update failed", "job_id", updated.ID, "error", err) |
| | 391 | + } |
| | 392 | + |
| | 393 | + bodyMap := map[string]any{ |
| | 394 | + "status": string(updated.Status), |
| | 395 | + "conclusion": nullableConclusion(updated.Conclusion), |
| | 396 | + } |
| | 397 | + if runCompleted { |
| | 398 | + bodyMap["run_status"] = "completed" |
| | 399 | + bodyMap["run_conclusion"] = string(runConclusion) |
| | 400 | + } |
| | 401 | + if terminal { |
| | 402 | + writeJSON(w, http.StatusOK, bodyMap) |
| | 403 | + return |
| | 404 | + } |
| | 405 | + h.writeNextTokenResponse(w, r, http.StatusOK, auth, bodyMap) |
| | 406 | +} |
| | 407 | + |
| | 408 | +type normalizedJobStatusUpdate struct { |
| | 409 | + Status actionsdb.WorkflowJobStatus |
| | 410 | + Conclusion actionsdb.NullCheckConclusion |
| | 411 | + StartedAt pgtype.Timestamptz |
| | 412 | + CompletedAt pgtype.Timestamptz |
| | 413 | +} |
| | 414 | + |
| | 415 | +func normalizeJobStatusUpdate(job actionsdb.WorkflowJob, body runnerStatusRequest) (normalizedJobStatusUpdate, bool, error) { |
| | 416 | + now := time.Now().UTC() |
| | 417 | + status := actionsdb.WorkflowJobStatus(strings.TrimSpace(body.Status)) |
| | 418 | + if status == "" { |
| | 419 | + return normalizedJobStatusUpdate{}, false, errors.New("status is required") |
| | 420 | + } |
| | 421 | + if !validWorkflowJobTransition(job.Status, status) { |
| | 422 | + return normalizedJobStatusUpdate{}, false, fmt.Errorf("invalid status transition %s -> %s", job.Status, status) |
| | 423 | + } |
| | 424 | + startedAt := job.StartedAt |
| | 425 | + if body.StartedAt != "" { |
| | 426 | + t, err := parseTimeOptional(body.StartedAt) |
| | 427 | + if err != nil { |
| | 428 | + return normalizedJobStatusUpdate{}, false, fmt.Errorf("started_at: %w", err) |
| | 429 | + } |
| | 430 | + startedAt = pgtype.Timestamptz{Time: t, Valid: !t.IsZero()} |
| | 431 | + } |
| | 432 | + if !startedAt.Valid && (status == actionsdb.WorkflowJobStatusRunning || |
| | 433 | + status == actionsdb.WorkflowJobStatusCompleted || |
| | 434 | + status == actionsdb.WorkflowJobStatusCancelled) { |
| | 435 | + startedAt = pgtype.Timestamptz{Time: now, Valid: true} |
| | 436 | + } |
| | 437 | + completedAt := job.CompletedAt |
| | 438 | + terminal := status == actionsdb.WorkflowJobStatusCompleted || status == actionsdb.WorkflowJobStatusCancelled |
| | 439 | + if body.CompletedAt != "" { |
| | 440 | + t, err := parseTimeOptional(body.CompletedAt) |
| | 441 | + if err != nil { |
| | 442 | + return normalizedJobStatusUpdate{}, false, fmt.Errorf("completed_at: %w", err) |
| | 443 | + } |
| | 444 | + completedAt = pgtype.Timestamptz{Time: t, Valid: !t.IsZero()} |
| | 445 | + } |
| | 446 | + if terminal && !completedAt.Valid { |
| | 447 | + completedAt = pgtype.Timestamptz{Time: now, Valid: true} |
| | 448 | + } |
| | 449 | + conclusion := actionsdb.NullCheckConclusion{} |
| | 450 | + if terminal { |
| | 451 | + c := strings.TrimSpace(body.Conclusion) |
| | 452 | + if c == "" && status == actionsdb.WorkflowJobStatusCancelled { |
| | 453 | + c = "cancelled" |
| | 454 | + } |
| | 455 | + if !validRunnerConclusion(c) { |
| | 456 | + return normalizedJobStatusUpdate{}, false, errors.New("invalid or missing conclusion") |
| | 457 | + } |
| | 458 | + conclusion = actionsdb.NullCheckConclusion{CheckConclusion: actionsdb.CheckConclusion(c), Valid: true} |
| | 459 | + } else if strings.TrimSpace(body.Conclusion) != "" { |
| | 460 | + return normalizedJobStatusUpdate{}, false, errors.New("conclusion is only valid for terminal statuses") |
| | 461 | + } |
| | 462 | + return normalizedJobStatusUpdate{ |
| | 463 | + Status: status, |
| | 464 | + Conclusion: conclusion, |
| | 465 | + StartedAt: startedAt, |
| | 466 | + CompletedAt: completedAt, |
| | 467 | + }, terminal, nil |
| | 468 | +} |
| | 469 | + |
| | 470 | +func validWorkflowJobTransition(from, to actionsdb.WorkflowJobStatus) bool { |
| | 471 | + switch to { |
| | 472 | + case actionsdb.WorkflowJobStatusRunning: |
| | 473 | + return from == actionsdb.WorkflowJobStatusQueued || from == actionsdb.WorkflowJobStatusRunning |
| | 474 | + case actionsdb.WorkflowJobStatusCompleted: |
| | 475 | + return from == actionsdb.WorkflowJobStatusQueued || from == actionsdb.WorkflowJobStatusRunning || from == actionsdb.WorkflowJobStatusCompleted |
| | 476 | + case actionsdb.WorkflowJobStatusCancelled: |
| | 477 | + return from == actionsdb.WorkflowJobStatusQueued || from == actionsdb.WorkflowJobStatusRunning || from == actionsdb.WorkflowJobStatusCancelled |
| | 478 | + default: |
| | 479 | + return false |
| | 480 | + } |
| | 481 | +} |
| | 482 | + |
| | 483 | +func (h *Handlers) applyJobStatus( |
| | 484 | + ctx context.Context, |
| | 485 | + job actionsdb.WorkflowJob, |
| | 486 | + update normalizedJobStatusUpdate, |
| | 487 | +) (actionsdb.WorkflowJob, bool, actionsdb.CheckConclusion, error) { |
| | 488 | + q := actionsdb.New() |
| | 489 | + tx, err := h.d.Pool.Begin(ctx) |
| | 490 | + if err != nil { |
| | 491 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 492 | + } |
| | 493 | + committed := false |
| | 494 | + defer func() { |
| | 495 | + if !committed { |
| | 496 | + _ = tx.Rollback(ctx) |
| | 497 | + } |
| | 498 | + }() |
| | 499 | + updated, err := q.UpdateWorkflowJobStatus(ctx, tx, actionsdb.UpdateWorkflowJobStatusParams{ |
| | 500 | + ID: job.ID, |
| | 501 | + Status: update.Status, |
| | 502 | + Conclusion: update.Conclusion, |
| | 503 | + StartedAt: update.StartedAt, |
| | 504 | + CompletedAt: update.CompletedAt, |
| | 505 | + }) |
| | 506 | + if err != nil { |
| | 507 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 508 | + } |
| | 509 | + jobs, err := q.ListJobsForRun(ctx, tx, updated.RunID) |
| | 510 | + if err != nil { |
| | 511 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 512 | + } |
| | 513 | + runConclusion, complete := deriveWorkflowRunConclusion(jobs) |
| | 514 | + if complete { |
| | 515 | + if _, err := q.CompleteWorkflowRun(ctx, tx, actionsdb.CompleteWorkflowRunParams{ |
| | 516 | + ID: updated.RunID, |
| | 517 | + Conclusion: runConclusion, |
| | 518 | + }); err != nil { |
| | 519 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 520 | + } |
| | 521 | + } else if err := q.MarkWorkflowRunRunning(ctx, tx, updated.RunID); err != nil { |
| | 522 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 523 | + } |
| | 524 | + if err := tx.Commit(ctx); err != nil { |
| | 525 | + return actionsdb.WorkflowJob{}, false, "", err |
| | 526 | + } |
| | 527 | + committed = true |
| | 528 | + return updated, complete, runConclusion, nil |
| | 529 | +} |
| | 530 | + |
| | 531 | +func deriveWorkflowRunConclusion(jobs []actionsdb.ListJobsForRunRow) (actionsdb.CheckConclusion, bool) { |
| | 532 | + if len(jobs) == 0 { |
| | 533 | + return actionsdb.CheckConclusionFailure, true |
| | 534 | + } |
| | 535 | + worst := actionsdb.CheckConclusionSuccess |
| | 536 | + for _, job := range jobs { |
| | 537 | + switch job.Status { |
| | 538 | + case actionsdb.WorkflowJobStatusCompleted, actionsdb.WorkflowJobStatusCancelled, actionsdb.WorkflowJobStatusSkipped: |
| | 539 | + default: |
| | 540 | + return "", false |
| | 541 | + } |
| | 542 | + if job.Status == actionsdb.WorkflowJobStatusCancelled { |
| | 543 | + worst = actionsdb.CheckConclusionCancelled |
| | 544 | + continue |
| | 545 | + } |
| | 546 | + if !job.Conclusion.Valid { |
| | 547 | + return actionsdb.CheckConclusionFailure, true |
| | 548 | + } |
| | 549 | + c := job.Conclusion.CheckConclusion |
| | 550 | + if c == actionsdb.CheckConclusionFailure || |
| | 551 | + c == actionsdb.CheckConclusionTimedOut || |
| | 552 | + c == actionsdb.CheckConclusionActionRequired { |
| | 553 | + return c, true |
| | 554 | + } |
| | 555 | + if c == actionsdb.CheckConclusionCancelled { |
| | 556 | + worst = actionsdb.CheckConclusionCancelled |
| | 557 | + } |
| | 558 | + } |
| | 559 | + return worst, true |
| | 560 | +} |
| | 561 | + |
| | 562 | +func (h *Handlers) updateCheckRunForJob(ctx context.Context, job actionsdb.WorkflowJob) error { |
| | 563 | + run, err := actionsdb.New().GetWorkflowRunByID(ctx, h.d.Pool, job.RunID) |
| | 564 | + if err != nil { |
| | 565 | + return err |
| | 566 | + } |
| | 567 | + name := job.JobName |
| | 568 | + if name == "" { |
| | 569 | + name = job.JobKey |
| | 570 | + } |
| | 571 | + checkRun, err := checksdb.New().GetCheckRunByExternalID(ctx, h.d.Pool, checksdb.GetCheckRunByExternalIDParams{ |
| | 572 | + RepoID: run.RepoID, |
| | 573 | + HeadSha: run.HeadSha, |
| | 574 | + Name: name, |
| | 575 | + ExternalID: pgtype.Text{String: fmt.Sprintf("workflow_run:%d:job:%s", job.RunID, job.JobKey), Valid: true}, |
| | 576 | + }) |
| | 577 | + if err != nil { |
| | 578 | + return err |
| | 579 | + } |
| | 580 | + params := checks.UpdateParams{ |
| | 581 | + RunID: checkRun.ID, |
| | 582 | + HasStatus: true, |
| | 583 | + HasStartedAt: true, |
| | 584 | + StartedAt: timeFromPg(job.StartedAt), |
| | 585 | + } |
| | 586 | + switch job.Status { |
| | 587 | + case actionsdb.WorkflowJobStatusRunning: |
| | 588 | + params.Status = "in_progress" |
| | 589 | + case actionsdb.WorkflowJobStatusCompleted, actionsdb.WorkflowJobStatusCancelled: |
| | 590 | + params.Status = "completed" |
| | 591 | + params.HasConclusion = true |
| | 592 | + if job.Conclusion.Valid { |
| | 593 | + params.Conclusion = string(job.Conclusion.CheckConclusion) |
| | 594 | + } else if job.Status == actionsdb.WorkflowJobStatusCancelled { |
| | 595 | + params.Conclusion = "cancelled" |
| | 596 | + } |
| | 597 | + params.HasCompletedAt = true |
| | 598 | + params.CompletedAt = timeFromPg(job.CompletedAt) |
| | 599 | + default: |
| | 600 | + return nil |
| | 601 | + } |
| | 602 | + _, err = checks.Update(ctx, checks.Deps{Pool: h.d.Pool, Logger: h.d.Logger}, params) |
| | 603 | + return err |
| | 604 | +} |
| | 605 | + |
| | 606 | +type runnerArtifactUploadRequest struct { |
| | 607 | + Name string `json:"name"` |
| | 608 | + SizeBytes int64 `json:"size_bytes"` |
| | 609 | +} |
| | 610 | + |
| | 611 | +var artifactNameRE = regexp.MustCompile(`^[A-Za-z0-9._-]+$`) |
| | 612 | + |
| | 613 | +func (h *Handlers) runnerJobArtifactUpload(w http.ResponseWriter, r *http.Request) { |
| | 614 | + if h.d.ObjectStore == nil { |
| | 615 | + writeAPIError(w, http.StatusServiceUnavailable, "object storage is not configured") |
| | 616 | + return |
| | 617 | + } |
| | 618 | + auth, ok := h.authenticateRunnerJob(w, r) |
| | 619 | + if !ok { |
| | 620 | + return |
| | 621 | + } |
| | 622 | + var body runnerArtifactUploadRequest |
| | 623 | + if err := decodeJSONBody(r.Body, &body); err != nil { |
| | 624 | + writeAPIError(w, http.StatusBadRequest, "invalid JSON: "+err.Error()) |
| | 625 | + return |
| | 626 | + } |
| | 627 | + body.Name = strings.TrimSpace(body.Name) |
| | 628 | + if !validArtifactName(body.Name) { |
| | 629 | + writeAPIError(w, http.StatusBadRequest, "invalid artifact name") |
| | 630 | + return |
| | 631 | + } |
| | 632 | + if body.SizeBytes < 0 { |
| | 633 | + writeAPIError(w, http.StatusBadRequest, "size_bytes must be non-negative") |
| | 634 | + return |
| | 635 | + } |
| | 636 | + objectKey := fmt.Sprintf("actions/runs/%d/artifacts/%s", auth.Claims.RunID, body.Name) |
| | 637 | + artifact, err := actionsdb.New().InsertArtifact(r.Context(), h.d.Pool, actionsdb.InsertArtifactParams{ |
| | 638 | + RunID: auth.Claims.RunID, |
| | 639 | + Name: body.Name, |
| | 640 | + ObjectKey: objectKey, |
| | 641 | + ByteCount: body.SizeBytes, |
| | 642 | + ExpiresAt: pgtype.Timestamptz{ |
| | 643 | + Time: time.Now().UTC().Add(90 * 24 * time.Hour), |
| | 644 | + Valid: true, |
| | 645 | + }, |
| | 646 | + }) |
| | 647 | + if err != nil { |
| | 648 | + writeAPIError(w, http.StatusInternalServerError, "artifact create failed") |
| | 649 | + return |
| | 650 | + } |
| | 651 | + uploadURL, err := h.d.ObjectStore.SignedURL(r.Context(), objectKey, 15*time.Minute, http.MethodPut) |
| | 652 | + if err != nil { |
| | 653 | + writeAPIError(w, http.StatusInternalServerError, "artifact upload url failed") |
| | 654 | + return |
| | 655 | + } |
| | 656 | + h.writeNextTokenResponse(w, r, http.StatusCreated, auth, map[string]any{ |
| | 657 | + "artifact_id": artifact.ID, |
| | 658 | + "upload_url": uploadURL, |
| | 659 | + }) |
| | 660 | +} |
| | 661 | + |
| | 662 | +func validArtifactName(name string) bool { |
| | 663 | + return len(name) >= 1 && |
| | 664 | + len(name) <= 100 && |
| | 665 | + artifactNameRE.MatchString(name) && |
| | 666 | + !strings.HasPrefix(name, "..") && |
| | 667 | + !strings.Contains(name, "/") |
| | 668 | +} |
| | 669 | + |
| | 670 | +func (h *Handlers) runnerJobCancelCheck(w http.ResponseWriter, r *http.Request) { |
| | 671 | + auth, ok := h.authenticateRunnerJob(w, r) |
| | 672 | + if !ok { |
| | 673 | + return |
| | 674 | + } |
| | 675 | + h.writeNextTokenResponse(w, r, http.StatusOK, auth, map[string]any{ |
| | 676 | + "cancelled": auth.Job.CancelRequested, |
| | 677 | + }) |
| | 678 | +} |
| | 679 | + |
| | 680 | +func (h *Handlers) writeNextTokenResponse( |
| | 681 | + w http.ResponseWriter, |
| | 682 | + r *http.Request, |
| | 683 | + status int, |
| | 684 | + auth runnerJobAuth, |
| | 685 | + body map[string]any, |
| | 686 | +) { |
| | 687 | + token, claims, err := h.d.RunnerJWT.Mint(runnerjwt.MintParams{ |
| | 688 | + RunnerID: auth.RunnerID, |
| | 689 | + JobID: auth.Claims.JobID, |
| | 690 | + RunID: auth.Claims.RunID, |
| | 691 | + RepoID: auth.Claims.RepoID, |
| | 692 | + }) |
| | 693 | + if err != nil { |
| | 694 | + h.d.Logger.ErrorContext(r.Context(), "runner next-token mint failed", "job_id", auth.Claims.JobID, "error", err) |
| | 695 | + writeAPIError(w, http.StatusInternalServerError, "runner token mint failed") |
| | 696 | + return |
| | 697 | + } |
| | 698 | + body["next_token"] = token |
| | 699 | + body["next_token_expires_at"] = time.Unix(claims.Exp, 0).UTC().Format(time.RFC3339) |
| | 700 | + writeJSON(w, status, body) |
| | 701 | +} |
| | 702 | + |
| 229 | type runnerClaimResponse struct { | 703 | type runnerClaimResponse struct { |
| 230 | Token string `json:"token"` | 704 | Token string `json:"token"` |
| 231 | ExpiresAt string `json:"expires_at"` | 705 | ExpiresAt string `json:"expires_at"` |
@@ -321,3 +795,45 @@ func rawJSONOrObject(b []byte) json.RawMessage { |
| 321 | } | 795 | } |
| 322 | return json.RawMessage(b) | 796 | return json.RawMessage(b) |
| 323 | } | 797 | } |
| | 798 | + |
| | 799 | +func decodeJSONBody(r io.Reader, v any) error { |
| | 800 | + dec := json.NewDecoder(r) |
| | 801 | + dec.DisallowUnknownFields() |
| | 802 | + return dec.Decode(v) |
| | 803 | +} |
| | 804 | + |
| | 805 | +func decodeBase64(s string) ([]byte, error) { |
| | 806 | + if out, err := base64.StdEncoding.DecodeString(s); err == nil { |
| | 807 | + return out, nil |
| | 808 | + } |
| | 809 | + return base64.RawStdEncoding.DecodeString(s) |
| | 810 | +} |
| | 811 | + |
| | 812 | +func validRunnerConclusion(c string) bool { |
| | 813 | + switch actionsdb.CheckConclusion(c) { |
| | 814 | + case actionsdb.CheckConclusionSuccess, |
| | 815 | + actionsdb.CheckConclusionFailure, |
| | 816 | + actionsdb.CheckConclusionNeutral, |
| | 817 | + actionsdb.CheckConclusionCancelled, |
| | 818 | + actionsdb.CheckConclusionSkipped, |
| | 819 | + actionsdb.CheckConclusionTimedOut, |
| | 820 | + actionsdb.CheckConclusionActionRequired: |
| | 821 | + return true |
| | 822 | + default: |
| | 823 | + return false |
| | 824 | + } |
| | 825 | +} |
| | 826 | + |
| | 827 | +func nullableConclusion(c actionsdb.NullCheckConclusion) any { |
| | 828 | + if !c.Valid { |
| | 829 | + return nil |
| | 830 | + } |
| | 831 | + return string(c.CheckConclusion) |
| | 832 | +} |
| | 833 | + |
| | 834 | +func timeFromPg(t pgtype.Timestamptz) time.Time { |
| | 835 | + if !t.Valid { |
| | 836 | + return time.Time{} |
| | 837 | + } |
| | 838 | + return t.Time |
| | 839 | +} |