MySQL · 1840 bytes Raw Blame History
1 -- SPDX-License-Identifier: AGPL-3.0-or-later
2 --
3 -- S41a workflow step log chunks — append-only log buffer for an
4 -- in-flight step.
5 --
6 -- Hot path during a running step: runner POSTs each chunk via the
7 -- chunks API (S41c); we INSERT a row keyed by (step_id, seq). The UI's
8 -- SSE handler (S41f) LISTENs on `step_log_<step_id>` and SELECTs new
9 -- chunks since the last seen seq.
10 --
11 -- Cold path after step completion: a single workflow:finalize_step
12 -- worker job (S41d) concatenates all chunks for the step, uploads the
13 -- result to Spaces (workflow_steps.log_object_key), and DELETEs the
14 -- chunk rows. Postgres stays small; long-term storage stays cheap.
15 --
16 -- Per-row cap: 512 KB chunk size. Per-step soft cap (10 MB) and per-job
17 -- hard cap (100 MB) are enforced runner-side at insert time (S41d) —
18 -- we don't put a CHECK constraint on the cumulative volume because
19 -- each row is independent. Total budget is observability + runner
20 -- discipline.
21 --
22 -- created_at indexed for retention sweep (S41g) on chunks orphaned by
23 -- runner crashes.
24
25 -- +goose Up
26
27 CREATE TABLE workflow_step_log_chunks (
28 id bigserial PRIMARY KEY,
29 step_id bigint NOT NULL REFERENCES workflow_steps(id) ON DELETE CASCADE,
30 seq integer NOT NULL,
31 chunk bytea NOT NULL,
32 created_at timestamptz NOT NULL DEFAULT now(),
33
34 UNIQUE (step_id, seq),
35
36 CONSTRAINT workflow_step_log_chunks_seq_nonneg CHECK (seq >= 0),
37 CONSTRAINT workflow_step_log_chunks_size_cap CHECK (
38 octet_length(chunk) BETWEEN 1 AND 524288
39 )
40 );
41
42 CREATE INDEX workflow_step_log_chunks_step_seq_idx
43 ON workflow_step_log_chunks (step_id, seq);
44 CREATE INDEX workflow_step_log_chunks_created_idx
45 ON workflow_step_log_chunks (created_at);
46
47
48 -- +goose Down
49 DROP TABLE IF EXISTS workflow_step_log_chunks;
50