tenseleyflow/shithub / 07bdfb8

Browse files

actions/runner: scrub claimed secrets on log ingest

Authored by mfwolffe <wolffemf@dukes.jmu.edu>
SHA
07bdfb8eecb532151b400e116919da3c1e32e489
Parents
778c821
Tree
395a030

19 changed files

StatusFile+-
M cmd/shithubd-runner/run.go 22 17
M internal/actions/queries/workflow_step_log_chunks.sql 17 0
M internal/actions/sqlc/querier.go 3 0
M internal/actions/sqlc/workflow_step_log_chunks.sql.go 66 0
M internal/infra/metrics/metrics.go 8 0
M internal/runner/api/client.go 2 0
M internal/runner/config/config.go 96 19
M internal/runner/config/config_test.go 38 5
M internal/runner/engine/docker.go 8 0
M internal/runner/engine/docker_test.go 35 0
M internal/runner/engine/types.go 1 0
M internal/runner/runner.go 13 0
M internal/runner/scrub/scrub.go 38 3
M internal/runner/scrub/scrub_test.go 6 0
M internal/web/auth_wiring.go 2 0
M internal/web/handlers/api/api.go 2 0
M internal/web/handlers/api/runners.go 225 25
M internal/web/handlers/api/runners_test.go 195 0
M internal/web/server.go 10 2
cmd/shithubd-runner/run.gomodified
@@ -61,6 +61,7 @@ var runCmd = &cobra.Command{
61
 			SeccompProfile: cfg.Engine.SeccompProfile,
61
 			SeccompProfile: cfg.Engine.SeccompProfile,
62
 			User:           cfg.Engine.User,
62
 			User:           cfg.Engine.User,
63
 			PidsLimit:      cfg.Engine.PidsLimit,
63
 			PidsLimit:      cfg.Engine.PidsLimit,
64
+			DNSServers:     cfg.Engine.DNSServers,
64
 			Stdout:         os.Stdout,
65
 			Stdout:         os.Stdout,
65
 			Stderr:         os.Stderr,
66
 			Stderr:         os.Stderr,
66
 			Logger:         logger,
67
 			Logger:         logger,
@@ -97,29 +98,33 @@ func init() {
97
 	runCmd.Flags().String("seccomp-profile", "", "Container seccomp profile path")
98
 	runCmd.Flags().String("seccomp-profile", "", "Container seccomp profile path")
98
 	runCmd.Flags().String("container-user", "", "Default container user")
99
 	runCmd.Flags().String("container-user", "", "Default container user")
99
 	runCmd.Flags().Int("pids-limit", 0, "Container PID limit")
100
 	runCmd.Flags().Int("pids-limit", 0, "Container PID limit")
101
+	runCmd.Flags().String("network-allowlist", "", "Comma-separated host patterns allowed by the runner DNS policy")
102
+	runCmd.Flags().String("dns-servers", "", "Comma-separated DNS servers passed to step containers")
100
 	runCmd.Flags().String("log-level", "", "Log level: debug, info, warn, error")
103
 	runCmd.Flags().String("log-level", "", "Log level: debug, info, warn, error")
101
 	runCmd.Flags().String("log-format", "", "Log format: text or json")
104
 	runCmd.Flags().String("log-format", "", "Log format: text or json")
102
 }
105
 }
103
 
106
 
104
 func flagOverrides(cmd *cobra.Command) map[string]string {
107
 func flagOverrides(cmd *cobra.Command) map[string]string {
105
 	keys := map[string]string{
108
 	keys := map[string]string{
106
-		"server-url":      "server.base_url",
109
+		"server-url":        "server.base_url",
107
-		"token":           "runner.token",
110
+		"token":             "runner.token",
108
-		"labels":          "runner.labels",
111
+		"labels":            "runner.labels",
109
-		"capacity":        "runner.capacity",
112
+		"capacity":          "runner.capacity",
110
-		"poll-interval":   "runner.poll_interval",
113
+		"poll-interval":     "runner.poll_interval",
111
-		"workspace-root":  "runner.workspace_root",
114
+		"workspace-root":    "runner.workspace_root",
112
-		"workspace-ttl":   "runner.workspace_ttl",
115
+		"workspace-ttl":     "runner.workspace_ttl",
113
-		"engine":          "engine.kind",
116
+		"engine":            "engine.kind",
114
-		"image":           "engine.default_image",
117
+		"image":             "engine.default_image",
115
-		"network":         "engine.network",
118
+		"network":           "engine.network",
116
-		"memory":          "engine.memory",
119
+		"memory":            "engine.memory",
117
-		"cpus":            "engine.cpus",
120
+		"cpus":              "engine.cpus",
118
-		"seccomp-profile": "engine.seccomp_profile",
121
+		"seccomp-profile":   "engine.seccomp_profile",
119
-		"container-user":  "engine.user",
122
+		"container-user":    "engine.user",
120
-		"pids-limit":      "engine.pids_limit",
123
+		"pids-limit":        "engine.pids_limit",
121
-		"log-level":       "log.level",
124
+		"network-allowlist": "runner.network_allowlist",
122
-		"log-format":      "log.format",
125
+		"dns-servers":       "engine.dns_servers",
126
+		"log-level":         "log.level",
127
+		"log-format":        "log.format",
123
 	}
128
 	}
124
 	out := make(map[string]string)
129
 	out := make(map[string]string)
125
 	cmd.Flags().Visit(func(f *pflag.Flag) {
130
 	cmd.Flags().Visit(func(f *pflag.Flag) {
internal/actions/queries/workflow_step_log_chunks.sqlmodified
@@ -13,5 +13,22 @@ WHERE step_id = $1 AND seq > $2
13
 ORDER BY seq ASC
13
 ORDER BY seq ASC
14
 LIMIT $3;
14
 LIMIT $3;
15
 
15
 
16
+-- name: GetStepLogChunkBefore :one
17
+SELECT id, step_id, seq, chunk, created_at
18
+FROM workflow_step_log_chunks
19
+WHERE step_id = $1 AND seq < $2
20
+ORDER BY seq DESC
21
+LIMIT 1;
22
+
23
+-- name: GetStepLogChunkByStepSeq :one
24
+SELECT id, step_id, seq, chunk, created_at
25
+FROM workflow_step_log_chunks
26
+WHERE step_id = $1 AND seq = $2;
27
+
28
+-- name: UpdateStepLogChunk :exec
29
+UPDATE workflow_step_log_chunks
30
+SET chunk = $2
31
+WHERE id = $1;
32
+
16
 -- name: DeleteStepLogChunks :exec
33
 -- name: DeleteStepLogChunks :exec
17
 DELETE FROM workflow_step_log_chunks WHERE step_id = $1;
34
 DELETE FROM workflow_step_log_chunks WHERE step_id = $1;
internal/actions/sqlc/querier.gomodified
@@ -42,6 +42,8 @@ type Querier interface {
42
 	GetRunnerByID(ctx context.Context, db DBTX, id int64) (WorkflowRunner, error)
42
 	GetRunnerByID(ctx context.Context, db DBTX, id int64) (WorkflowRunner, error)
43
 	GetRunnerByName(ctx context.Context, db DBTX, name string) (WorkflowRunner, error)
43
 	GetRunnerByName(ctx context.Context, db DBTX, name string) (WorkflowRunner, error)
44
 	GetRunnerByTokenHash(ctx context.Context, db DBTX, tokenHash []byte) (GetRunnerByTokenHashRow, error)
44
 	GetRunnerByTokenHash(ctx context.Context, db DBTX, tokenHash []byte) (GetRunnerByTokenHashRow, error)
45
+	GetStepLogChunkBefore(ctx context.Context, db DBTX, arg GetStepLogChunkBeforeParams) (WorkflowStepLogChunk, error)
46
+	GetStepLogChunkByStepSeq(ctx context.Context, db DBTX, arg GetStepLogChunkByStepSeqParams) (WorkflowStepLogChunk, error)
45
 	GetWorkflowJobByID(ctx context.Context, db DBTX, id int64) (WorkflowJob, error)
47
 	GetWorkflowJobByID(ctx context.Context, db DBTX, id int64) (WorkflowJob, error)
46
 	GetWorkflowRunByID(ctx context.Context, db DBTX, id int64) (WorkflowRun, error)
48
 	GetWorkflowRunByID(ctx context.Context, db DBTX, id int64) (WorkflowRun, error)
47
 	GetWorkflowStepByID(ctx context.Context, db DBTX, id int64) (WorkflowStep, error)
49
 	GetWorkflowStepByID(ctx context.Context, db DBTX, id int64) (WorkflowStep, error)
@@ -85,6 +87,7 @@ type Querier interface {
85
 	NextRunIndexForRepo(ctx context.Context, db DBTX, repoID int64) (int64, error)
87
 	NextRunIndexForRepo(ctx context.Context, db DBTX, repoID int64) (int64, error)
86
 	RevokeAllTokensForRunner(ctx context.Context, db DBTX, runnerID int64) error
88
 	RevokeAllTokensForRunner(ctx context.Context, db DBTX, runnerID int64) error
87
 	TouchRunnerHeartbeat(ctx context.Context, db DBTX, arg TouchRunnerHeartbeatParams) error
89
 	TouchRunnerHeartbeat(ctx context.Context, db DBTX, arg TouchRunnerHeartbeatParams) error
90
+	UpdateStepLogChunk(ctx context.Context, db DBTX, arg UpdateStepLogChunkParams) error
88
 	UpdateWorkflowJobStatus(ctx context.Context, db DBTX, arg UpdateWorkflowJobStatusParams) (WorkflowJob, error)
91
 	UpdateWorkflowJobStatus(ctx context.Context, db DBTX, arg UpdateWorkflowJobStatusParams) (WorkflowJob, error)
89
 	UpdateWorkflowStepLogObject(ctx context.Context, db DBTX, arg UpdateWorkflowStepLogObjectParams) (WorkflowStep, error)
92
 	UpdateWorkflowStepLogObject(ctx context.Context, db DBTX, arg UpdateWorkflowStepLogObjectParams) (WorkflowStep, error)
90
 	UpdateWorkflowStepStatus(ctx context.Context, db DBTX, arg UpdateWorkflowStepStatusParams) (WorkflowStep, error)
93
 	UpdateWorkflowStepStatus(ctx context.Context, db DBTX, arg UpdateWorkflowStepStatusParams) (WorkflowStep, error)
internal/actions/sqlc/workflow_step_log_chunks.sql.gomodified
@@ -54,6 +54,56 @@ func (q *Queries) DeleteStepLogChunks(ctx context.Context, db DBTX, stepID int64
54
 	return err
54
 	return err
55
 }
55
 }
56
 
56
 
57
+const getStepLogChunkBefore = `-- name: GetStepLogChunkBefore :one
58
+SELECT id, step_id, seq, chunk, created_at
59
+FROM workflow_step_log_chunks
60
+WHERE step_id = $1 AND seq < $2
61
+ORDER BY seq DESC
62
+LIMIT 1
63
+`
64
+
65
+type GetStepLogChunkBeforeParams struct {
66
+	StepID int64
67
+	Seq    int32
68
+}
69
+
70
+func (q *Queries) GetStepLogChunkBefore(ctx context.Context, db DBTX, arg GetStepLogChunkBeforeParams) (WorkflowStepLogChunk, error) {
71
+	row := db.QueryRow(ctx, getStepLogChunkBefore, arg.StepID, arg.Seq)
72
+	var i WorkflowStepLogChunk
73
+	err := row.Scan(
74
+		&i.ID,
75
+		&i.StepID,
76
+		&i.Seq,
77
+		&i.Chunk,
78
+		&i.CreatedAt,
79
+	)
80
+	return i, err
81
+}
82
+
83
+const getStepLogChunkByStepSeq = `-- name: GetStepLogChunkByStepSeq :one
84
+SELECT id, step_id, seq, chunk, created_at
85
+FROM workflow_step_log_chunks
86
+WHERE step_id = $1 AND seq = $2
87
+`
88
+
89
+type GetStepLogChunkByStepSeqParams struct {
90
+	StepID int64
91
+	Seq    int32
92
+}
93
+
94
+func (q *Queries) GetStepLogChunkByStepSeq(ctx context.Context, db DBTX, arg GetStepLogChunkByStepSeqParams) (WorkflowStepLogChunk, error) {
95
+	row := db.QueryRow(ctx, getStepLogChunkByStepSeq, arg.StepID, arg.Seq)
96
+	var i WorkflowStepLogChunk
97
+	err := row.Scan(
98
+		&i.ID,
99
+		&i.StepID,
100
+		&i.Seq,
101
+		&i.Chunk,
102
+		&i.CreatedAt,
103
+	)
104
+	return i, err
105
+}
106
+
57
 const listStepLogChunks = `-- name: ListStepLogChunks :many
107
 const listStepLogChunks = `-- name: ListStepLogChunks :many
58
 SELECT id, step_id, seq, chunk, created_at
108
 SELECT id, step_id, seq, chunk, created_at
59
 FROM workflow_step_log_chunks
109
 FROM workflow_step_log_chunks
@@ -93,3 +143,19 @@ func (q *Queries) ListStepLogChunks(ctx context.Context, db DBTX, arg ListStepLo
93
 	}
143
 	}
94
 	return items, nil
144
 	return items, nil
95
 }
145
 }
146
+
147
+const updateStepLogChunk = `-- name: UpdateStepLogChunk :exec
148
+UPDATE workflow_step_log_chunks
149
+SET chunk = $2
150
+WHERE id = $1
151
+`
152
+
153
+type UpdateStepLogChunkParams struct {
154
+	ID    int64
155
+	Chunk []byte
156
+}
157
+
158
+func (q *Queries) UpdateStepLogChunk(ctx context.Context, db DBTX, arg UpdateStepLogChunkParams) error {
159
+	_, err := db.Exec(ctx, updateStepLogChunk, arg.ID, arg.Chunk)
160
+	return err
161
+}
internal/infra/metrics/metrics.gomodified
@@ -149,6 +149,13 @@ var (
149
 		},
149
 		},
150
 		[]string{"result"},
150
 		[]string{"result"},
151
 	)
151
 	)
152
+	ActionsLogScrubReplacementsTotal = prometheus.NewCounterVec(
153
+		prometheus.CounterOpts{
154
+			Name: "shithub_actions_log_scrub_replacements_total",
155
+			Help: "Total exact secret-value replacements performed on Actions log chunks.",
156
+		},
157
+		[]string{"location"},
158
+	)
152
 )
159
 )
153
 
160
 
154
 func init() {
161
 func init() {
@@ -169,6 +176,7 @@ func init() {
169
 		ActionsRunnerRegistrationsTotal,
176
 		ActionsRunnerRegistrationsTotal,
170
 		ActionsRunnerHeartbeatsTotal,
177
 		ActionsRunnerHeartbeatsTotal,
171
 		ActionsRunnerJWTTotal,
178
 		ActionsRunnerJWTTotal,
179
+		ActionsLogScrubReplacementsTotal,
172
 	)
180
 	)
173
 }
181
 }
174
 
182
 
internal/runner/api/client.gomodified
@@ -73,6 +73,8 @@ type Job struct {
73
 	If             string            `json:"if"`
73
 	If             string            `json:"if"`
74
 	TimeoutMinutes int32             `json:"timeout_minutes"`
74
 	TimeoutMinutes int32             `json:"timeout_minutes"`
75
 	Permissions    json.RawMessage   `json:"permissions"`
75
 	Permissions    json.RawMessage   `json:"permissions"`
76
+	Secrets        map[string]string `json:"secrets"`
77
+	MaskValues     []string          `json:"mask_values"`
76
 	Env            map[string]string `json:"env"`
78
 	Env            map[string]string `json:"env"`
77
 	Steps          []Step            `json:"steps"`
79
 	Steps          []Step            `json:"steps"`
78
 }
80
 }
internal/runner/config/config.gomodified
@@ -12,6 +12,7 @@ package config
12
 import (
12
 import (
13
 	"errors"
13
 	"errors"
14
 	"fmt"
14
 	"fmt"
15
+	"net/netip"
15
 	"net/url"
16
 	"net/url"
16
 	"os"
17
 	"os"
17
 	"reflect"
18
 	"reflect"
@@ -33,6 +34,17 @@ const (
33
 	defaultContainerPIDMax = 512
34
 	defaultContainerPIDMax = 512
34
 )
35
 )
35
 
36
 
37
+var defaultNetworkAllowlist = []string{
38
+	"api.github.com",
39
+	"auth.docker.io",
40
+	"codeload.github.com",
41
+	"github.com",
42
+	"objects.githubusercontent.com",
43
+	"production.cloudflare.docker.com",
44
+	"registry-1.docker.io",
45
+	"*.githubusercontent.com",
46
+}
47
+
36
 // LoadOptions controls config resolution. Zero value uses the default path,
48
 // LoadOptions controls config resolution. Zero value uses the default path,
37
 // process environment, and no CLI overrides.
49
 // process environment, and no CLI overrides.
38
 type LoadOptions struct {
50
 type LoadOptions struct {
@@ -54,23 +66,25 @@ type ServerConfig struct {
54
 }
66
 }
55
 
67
 
56
 type RunnerConfig struct {
68
 type RunnerConfig struct {
57
-	Token         string        `toml:"token"`
69
+	Token            string        `toml:"token"`
58
-	Labels        []string      `toml:"labels"`
70
+	Labels           []string      `toml:"labels"`
59
-	Capacity      int           `toml:"capacity"`
71
+	Capacity         int           `toml:"capacity"`
60
-	PollInterval  time.Duration `toml:"poll_interval"`
72
+	PollInterval     time.Duration `toml:"poll_interval"`
61
-	WorkspaceRoot string        `toml:"workspace_root"`
73
+	WorkspaceRoot    string        `toml:"workspace_root"`
62
-	WorkspaceTTL  time.Duration `toml:"workspace_ttl"`
74
+	WorkspaceTTL     time.Duration `toml:"workspace_ttl"`
75
+	NetworkAllowlist []string      `toml:"network_allowlist"`
63
 }
76
 }
64
 
77
 
65
 type EngineConfig struct {
78
 type EngineConfig struct {
66
-	Kind           string `toml:"kind"`
79
+	Kind           string   `toml:"kind"`
67
-	DefaultImage   string `toml:"default_image"`
80
+	DefaultImage   string   `toml:"default_image"`
68
-	Network        string `toml:"network"`
81
+	Network        string   `toml:"network"`
69
-	Memory         string `toml:"memory"`
82
+	Memory         string   `toml:"memory"`
70
-	CPUs           string `toml:"cpus"`
83
+	CPUs           string   `toml:"cpus"`
71
-	SeccompProfile string `toml:"seccomp_profile"`
84
+	SeccompProfile string   `toml:"seccomp_profile"`
72
-	User           string `toml:"user"`
85
+	User           string   `toml:"user"`
73
-	PidsLimit      int    `toml:"pids_limit"`
86
+	PidsLimit      int      `toml:"pids_limit"`
87
+	DNSServers     []string `toml:"dns_servers"`
74
 }
88
 }
75
 
89
 
76
 type LogConfig struct {
90
 type LogConfig struct {
@@ -84,11 +98,12 @@ func Defaults() Config {
84
 			BaseURL: "http://127.0.0.1:8080",
98
 			BaseURL: "http://127.0.0.1:8080",
85
 		},
99
 		},
86
 		Runner: RunnerConfig{
100
 		Runner: RunnerConfig{
87
-			Labels:        []string{"self-hosted", "linux", "ubuntu-latest"},
101
+			Labels:           []string{"self-hosted", "linux", "ubuntu-latest"},
88
-			Capacity:      1,
102
+			Capacity:         1,
89
-			PollInterval:  5 * time.Second,
103
+			PollInterval:     5 * time.Second,
90
-			WorkspaceRoot: "/var/lib/shithubd-runner/workspaces",
104
+			WorkspaceRoot:    "/var/lib/shithubd-runner/workspaces",
91
-			WorkspaceTTL:  24 * time.Hour,
105
+			WorkspaceTTL:     24 * time.Hour,
106
+			NetworkAllowlist: append([]string{}, defaultNetworkAllowlist...),
92
 		},
107
 		},
93
 		Engine: EngineConfig{
108
 		Engine: EngineConfig{
94
 			Kind:           "docker",
109
 			Kind:           "docker",
@@ -227,6 +242,11 @@ func Validate(c *Config) error {
227
 	if c.Runner.WorkspaceTTL <= 0 {
242
 	if c.Runner.WorkspaceTTL <= 0 {
228
 		return errors.New("runner config: runner.workspace_ttl must be positive")
243
 		return errors.New("runner config: runner.workspace_ttl must be positive")
229
 	}
244
 	}
245
+	allowlist, err := normalizeHostPatterns(c.Runner.NetworkAllowlist)
246
+	if err != nil {
247
+		return fmt.Errorf("runner config: runner.network_allowlist: %w", err)
248
+	}
249
+	c.Runner.NetworkAllowlist = allowlist
230
 
250
 
231
 	switch strings.ToLower(strings.TrimSpace(c.Engine.Kind)) {
251
 	switch strings.ToLower(strings.TrimSpace(c.Engine.Kind)) {
232
 	case "docker", "podman":
252
 	case "docker", "podman":
@@ -257,6 +277,11 @@ func Validate(c *Config) error {
257
 	if c.Engine.PidsLimit <= 0 {
277
 	if c.Engine.PidsLimit <= 0 {
258
 		return fmt.Errorf("runner config: engine.pids_limit must be positive, got %d", c.Engine.PidsLimit)
278
 		return fmt.Errorf("runner config: engine.pids_limit must be positive, got %d", c.Engine.PidsLimit)
259
 	}
279
 	}
280
+	dnsServers, err := normalizeDNSServers(c.Engine.DNSServers)
281
+	if err != nil {
282
+		return fmt.Errorf("runner config: engine.dns_servers: %w", err)
283
+	}
284
+	c.Engine.DNSServers = dnsServers
260
 
285
 
261
 	switch strings.ToLower(c.Log.Level) {
286
 	switch strings.ToLower(c.Log.Level) {
262
 	case "debug", "info", "warn", "error":
287
 	case "debug", "info", "warn", "error":
@@ -273,6 +298,58 @@ func Validate(c *Config) error {
273
 	return nil
298
 	return nil
274
 }
299
 }
275
 
300
 
301
+func normalizeHostPatterns(patterns []string) ([]string, error) {
302
+	seen := map[string]struct{}{}
303
+	out := make([]string, 0, len(patterns))
304
+	for _, p := range patterns {
305
+		p = strings.ToLower(strings.TrimSpace(p))
306
+		if p == "" {
307
+			continue
308
+		}
309
+		if strings.ContainsAny(p, "/:") || strings.Trim(p, "*.abcdefghijklmnopqrstuvwxyz0123456789-") != "" {
310
+			return nil, fmt.Errorf("invalid host pattern %q", p)
311
+		}
312
+		if strings.Contains(p, "**") || strings.Contains(p, "..") || strings.HasPrefix(p, ".") || strings.HasSuffix(p, ".") {
313
+			return nil, fmt.Errorf("invalid host pattern %q", p)
314
+		}
315
+		if strings.Contains(p, "*") && !strings.HasPrefix(p, "*.") {
316
+			return nil, fmt.Errorf("invalid wildcard host pattern %q", p)
317
+		}
318
+		if _, ok := seen[p]; ok {
319
+			continue
320
+		}
321
+		seen[p] = struct{}{}
322
+		out = append(out, p)
323
+	}
324
+	if len(out) == 0 {
325
+		return nil, errors.New("must contain at least one host pattern")
326
+	}
327
+	return out, nil
328
+}
329
+
330
+func normalizeDNSServers(servers []string) ([]string, error) {
331
+	seen := map[string]struct{}{}
332
+	out := make([]string, 0, len(servers))
333
+	for _, s := range servers {
334
+		s = strings.TrimSpace(s)
335
+		if s == "" {
336
+			continue
337
+		}
338
+		if strings.ContainsAny(s, " \t\r\n") {
339
+			return nil, fmt.Errorf("invalid DNS server %q", s)
340
+		}
341
+		if _, err := netip.ParseAddr(s); err != nil {
342
+			return nil, fmt.Errorf("invalid DNS server %q", s)
343
+		}
344
+		if _, ok := seen[s]; ok {
345
+			continue
346
+		}
347
+		seen[s] = struct{}{}
348
+		out = append(out, s)
349
+	}
350
+	return out, nil
351
+}
352
+
276
 func normalizeLabels(labels []string) ([]string, error) {
353
 func normalizeLabels(labels []string) ([]string, error) {
277
 	if len(labels) == 1 && strings.Contains(labels[0], ",") {
354
 	if len(labels) == 1 && strings.Contains(labels[0], ",") {
278
 		return runnerlabels.ParseCSV(labels[0])
355
 		return runnerlabels.ParseCSV(labels[0])
internal/runner/config/config_test.gomodified
@@ -34,6 +34,9 @@ func TestLoad_DefaultsWithToken(t *testing.T) {
34
 	if cfg.Engine.PidsLimit != 512 {
34
 	if cfg.Engine.PidsLimit != 512 {
35
 		t.Fatalf("Engine.PidsLimit: %d", cfg.Engine.PidsLimit)
35
 		t.Fatalf("Engine.PidsLimit: %d", cfg.Engine.PidsLimit)
36
 	}
36
 	}
37
+	if want := []string{"api.github.com", "auth.docker.io", "codeload.github.com", "github.com", "objects.githubusercontent.com", "production.cloudflare.docker.com", "registry-1.docker.io", "*.githubusercontent.com"}; !reflect.DeepEqual(cfg.Runner.NetworkAllowlist, want) {
38
+		t.Fatalf("NetworkAllowlist: got %#v want %#v", cfg.Runner.NetworkAllowlist, want)
39
+	}
37
 	if cfg.Runner.PollInterval != 5*time.Second {
40
 	if cfg.Runner.PollInterval != 5*time.Second {
38
 		t.Fatalf("PollInterval: %v", cfg.Runner.PollInterval)
41
 		t.Fatalf("PollInterval: %v", cfg.Runner.PollInterval)
39
 	}
42
 	}
@@ -54,6 +57,7 @@ capacity = 2
54
 poll_interval = "10s"
57
 poll_interval = "10s"
55
 workspace_root = "/tmp/file"
58
 workspace_root = "/tmp/file"
56
 workspace_ttl = "12h"
59
 workspace_ttl = "12h"
60
+network_allowlist = ["github.com", "*.githubusercontent.com"]
57
 
61
 
58
 [engine]
62
 [engine]
59
 kind = "docker"
63
 kind = "docker"
@@ -64,6 +68,7 @@ cpus = "1"
64
 seccomp_profile = "/file/seccomp.json"
68
 seccomp_profile = "/file/seccomp.json"
65
 user = "1000:1000"
69
 user = "1000:1000"
66
 pids_limit = 64
70
 pids_limit = 64
71
+dns_servers = ["172.30.0.10"]
67
 `
72
 `
68
 	if err := os.WriteFile(path, []byte(body), 0o600); err != nil {
73
 	if err := os.WriteFile(path, []byte(body), 0o600); err != nil {
69
 		t.Fatalf("WriteFile: %v", err)
74
 		t.Fatalf("WriteFile: %v", err)
@@ -74,15 +79,17 @@ pids_limit = 64
74
 		Environ: []string{
79
 		Environ: []string{
75
 			"SHITHUB_RUNNER_TOKEN=alias-token",
80
 			"SHITHUB_RUNNER_TOKEN=alias-token",
76
 			"SHITHUB_RUNNER_ENGINE__PIDS_LIMIT=256",
81
 			"SHITHUB_RUNNER_ENGINE__PIDS_LIMIT=256",
82
+			"SHITHUB_RUNNER_ENGINE__DNS_SERVERS=172.30.0.11,172.30.0.12",
77
 			"SHITHUB_RUNNER_RUNNER__CAPACITY=3",
83
 			"SHITHUB_RUNNER_RUNNER__CAPACITY=3",
78
 			"SHITHUB_RUNNER_RUNNER__LABELS=self-hosted,linux,x64",
84
 			"SHITHUB_RUNNER_RUNNER__LABELS=self-hosted,linux,x64",
79
 		},
85
 		},
80
 		Overrides: map[string]string{
86
 		Overrides: map[string]string{
81
-			"server.base_url":        "https://flag.example/path/",
87
+			"server.base_url":          "https://flag.example/path/",
82
-			"runner.capacity":        "4",
88
+			"runner.capacity":          "4",
83
-			"runner.poll_interval":   "2s",
89
+			"runner.poll_interval":     "2s",
84
-			"engine.seccomp_profile": "/flag/seccomp.json",
90
+			"runner.network_allowlist": "api.github.com,github.com",
85
-			"engine.user":            "123:456",
91
+			"engine.seccomp_profile":   "/flag/seccomp.json",
92
+			"engine.user":              "123:456",
86
 		},
93
 		},
87
 	})
94
 	})
88
 	if err != nil {
95
 	if err != nil {
@@ -112,6 +119,12 @@ pids_limit = 64
112
 	if cfg.Engine.PidsLimit != 256 {
119
 	if cfg.Engine.PidsLimit != 256 {
113
 		t.Fatalf("PidsLimit: %d", cfg.Engine.PidsLimit)
120
 		t.Fatalf("PidsLimit: %d", cfg.Engine.PidsLimit)
114
 	}
121
 	}
122
+	if want := []string{"api.github.com", "github.com"}; !reflect.DeepEqual(cfg.Runner.NetworkAllowlist, want) {
123
+		t.Fatalf("NetworkAllowlist: got %#v want %#v", cfg.Runner.NetworkAllowlist, want)
124
+	}
125
+	if want := []string{"172.30.0.11", "172.30.0.12"}; !reflect.DeepEqual(cfg.Engine.DNSServers, want) {
126
+		t.Fatalf("DNSServers: got %#v want %#v", cfg.Engine.DNSServers, want)
127
+	}
115
 }
128
 }
116
 
129
 
117
 func TestLoad_RequiresToken(t *testing.T) {
130
 func TestLoad_RequiresToken(t *testing.T) {
@@ -151,3 +164,23 @@ func TestValidate_RejectsBadPidsLimit(t *testing.T) {
151
 		t.Fatal("Validate returned nil error")
164
 		t.Fatal("Validate returned nil error")
152
 	}
165
 	}
153
 }
166
 }
167
+
168
+func TestValidate_RejectsBadNetworkAllowlist(t *testing.T) {
169
+	t.Parallel()
170
+	cfg := Defaults()
171
+	cfg.Runner.Token = "tok"
172
+	cfg.Runner.NetworkAllowlist = []string{"https://github.com"}
173
+	if err := Validate(&cfg); err == nil {
174
+		t.Fatal("Validate returned nil error")
175
+	}
176
+}
177
+
178
+func TestValidate_RejectsBadDNSServer(t *testing.T) {
179
+	t.Parallel()
180
+	cfg := Defaults()
181
+	cfg.Runner.Token = "tok"
182
+	cfg.Engine.DNSServers = []string{"dns.internal"}
183
+	if err := Validate(&cfg); err == nil {
184
+		t.Fatal("Validate returned nil error")
185
+	}
186
+}
internal/runner/engine/docker.gomodified
@@ -64,6 +64,7 @@ type DockerConfig struct {
64
 	SeccompProfile   string
64
 	SeccompProfile   string
65
 	User             string
65
 	User             string
66
 	PidsLimit        int
66
 	PidsLimit        int
67
+	DNSServers       []string
67
 	LogChunkBytes    int
68
 	LogChunkBytes    int
68
 	LogFlushInterval time.Duration
69
 	LogFlushInterval time.Duration
69
 	StepLogLimit     int64
70
 	StepLogLimit     int64
@@ -261,6 +262,12 @@ func (d *Docker) dockerInvocation(job Job, step Step) (dockerInvocation, error)
261
 		"--workdir=" + workdir,
262
 		"--workdir=" + workdir,
262
 		"--mount", "type=bind,src=" + job.WorkspaceDir + ",dst=/workspace,rw",
263
 		"--mount", "type=bind,src=" + job.WorkspaceDir + ",dst=/workspace,rw",
263
 	}
264
 	}
265
+	for _, dns := range d.cfg.DNSServers {
266
+		dns = strings.TrimSpace(dns)
267
+		if dns != "" {
268
+			args = append(args, "--dns", dns)
269
+		}
270
+	}
264
 	env, err := validateEnv(rendered.Env)
271
 	env, err := validateEnv(rendered.Env)
265
 	if err != nil {
272
 	if err != nil {
266
 		return dockerInvocation{}, err
273
 		return dockerInvocation{}, err
@@ -306,6 +313,7 @@ func expressionContext(job Job) expr.Context {
306
 		_ = json.Unmarshal([]byte(job.Event), &event)
313
 		_ = json.Unmarshal([]byte(job.Event), &event)
307
 	}
314
 	}
308
 	return expr.Context{
315
 	return expr.Context{
316
+		Secrets: job.Secrets,
309
 		Shithub: expr.ShithubContext{
317
 		Shithub: expr.ShithubContext{
310
 			Event: event,
318
 			Event: event,
311
 			RunID: fmt.Sprintf("%d", job.RunID),
319
 			RunID: fmt.Sprintf("%d", job.RunID),
internal/runner/engine/docker_test.gomodified
@@ -162,6 +162,29 @@ func TestDockerExecute_RootRequiresExplicitPermission(t *testing.T) {
162
 	}
162
 	}
163
 }
163
 }
164
 
164
 
165
+func TestDockerExecute_AddsConfiguredDNSServers(t *testing.T) {
166
+	t.Parallel()
167
+	rec := &recordingRunner{}
168
+	d := NewDocker(DockerConfig{
169
+		DefaultImage: "runner-image",
170
+		Network:      "actions-net",
171
+		Memory:       "2g",
172
+		CPUs:         "2",
173
+		DNSServers:   []string{"172.30.0.10", "172.30.0.11"},
174
+		Runner:       rec,
175
+	})
176
+	if _, err := d.Execute(t.Context(), Job{
177
+		ID:           1,
178
+		WorkspaceDir: t.TempDir(),
179
+		Steps:        []Step{{Run: "curl https://github.com"}},
180
+	}); err != nil {
181
+		t.Fatalf("Execute: %v", err)
182
+	}
183
+	if argAfterN(rec.args, "--dns", 0) != "172.30.0.10" || argAfterN(rec.args, "--dns", 1) != "172.30.0.11" {
184
+		t.Fatalf("dns args missing: %#v", rec.args)
185
+	}
186
+}
187
+
165
 func TestDockerExecute_StreamsStepLogs(t *testing.T) {
188
 func TestDockerExecute_StreamsStepLogs(t *testing.T) {
166
 	t.Parallel()
189
 	t.Parallel()
167
 	d := NewDocker(DockerConfig{
190
 	d := NewDocker(DockerConfig{
@@ -350,3 +373,15 @@ func argAfter(args []string, flag string) string {
350
 	}
373
 	}
351
 	return ""
374
 	return ""
352
 }
375
 }
376
+
377
+func argAfterN(args []string, flag string, n int) string {
378
+	for i, arg := range args {
379
+		if arg == flag {
380
+			if n == 0 && i+1 < len(args) {
381
+				return args[i+1]
382
+			}
383
+			n--
384
+		}
385
+	}
386
+	return ""
387
+}
internal/runner/engine/types.gomodified
@@ -46,6 +46,7 @@ type Job struct {
46
 	If             string
46
 	If             string
47
 	TimeoutMinutes int32
47
 	TimeoutMinutes int32
48
 	Permissions    json.RawMessage
48
 	Permissions    json.RawMessage
49
+	Secrets        map[string]string
49
 	Env            map[string]string
50
 	Env            map[string]string
50
 	Steps          []Step
51
 	Steps          []Step
51
 	WorkspaceDir   string
52
 	WorkspaceDir   string
internal/runner/runner.gomodified
@@ -353,13 +353,26 @@ func toEngineJob(job api.Job, workspaceDir, defaultImage string) engine.Job {
353
 		If:             job.If,
353
 		If:             job.If,
354
 		TimeoutMinutes: job.TimeoutMinutes,
354
 		TimeoutMinutes: job.TimeoutMinutes,
355
 		Permissions:    job.Permissions,
355
 		Permissions:    job.Permissions,
356
+		Secrets:        cloneStringMap(job.Secrets),
356
 		Env:            job.Env,
357
 		Env:            job.Env,
357
 		Steps:          steps,
358
 		Steps:          steps,
358
 		WorkspaceDir:   workspaceDir,
359
 		WorkspaceDir:   workspaceDir,
359
 		Image:          defaultImage,
360
 		Image:          defaultImage,
361
+		MaskValues:     append([]string{}, job.MaskValues...),
360
 	}
362
 	}
361
 }
363
 }
362
 
364
 
365
+func cloneStringMap(in map[string]string) map[string]string {
366
+	if len(in) == 0 {
367
+		return nil
368
+	}
369
+	out := make(map[string]string, len(in))
370
+	for k, v := range in {
371
+		out[k] = v
372
+	}
373
+	return out
374
+}
375
+
363
 func defaultSleep(ctx context.Context, d time.Duration) error {
376
 func defaultSleep(ctx context.Context, d time.Duration) error {
364
 	timer := time.NewTimer(d)
377
 	timer := time.NewTimer(d)
365
 	defer timer.Stop()
378
 	defer timer.Stop()
internal/runner/scrub/scrub.gomodified
@@ -11,9 +11,10 @@ import (
11
 const Mask = "***"
11
 const Mask = "***"
12
 
12
 
13
 type Scrubber struct {
13
 type Scrubber struct {
14
-	values   []string
14
+	values       []string
15
-	replacer *strings.Replacer
15
+	replacer     *strings.Replacer
16
-	tail     string
16
+	tail         string
17
+	replacements uint64
17
 }
18
 }
18
 
19
 
19
 func New(values []string) *Scrubber {
20
 func New(values []string) *Scrubber {
@@ -40,6 +41,7 @@ func (s *Scrubber) Scrub(chunk []byte) []byte {
40
 	}
41
 	}
41
 	emit := combined[:len(combined)-keep]
42
 	emit := combined[:len(combined)-keep]
42
 	s.tail = combined[len(combined)-keep:]
43
 	s.tail = combined[len(combined)-keep:]
44
+	s.replacements += countReplacements(emit, s.values)
43
 	return []byte(s.replacer.Replace(emit))
45
 	return []byte(s.replacer.Replace(emit))
44
 }
46
 }
45
 
47
 
@@ -52,9 +54,17 @@ func (s *Scrubber) Flush() []byte {
52
 	if s.replacer == nil {
54
 	if s.replacer == nil {
53
 		return []byte(tail)
55
 		return []byte(tail)
54
 	}
56
 	}
57
+	s.replacements += countReplacements(tail, s.values)
55
 	return []byte(s.replacer.Replace(tail))
58
 	return []byte(s.replacer.Replace(tail))
56
 }
59
 }
57
 
60
 
61
+func (s *Scrubber) Replacements() uint64 {
62
+	if s == nil {
63
+		return 0
64
+	}
65
+	return s.replacements
66
+}
67
+
58
 func normalize(values []string) []string {
68
 func normalize(values []string) []string {
59
 	seen := map[string]struct{}{}
69
 	seen := map[string]struct{}{}
60
 	out := make([]string, 0, len(values))
70
 	out := make([]string, 0, len(values))
@@ -74,6 +84,31 @@ func normalize(values []string) []string {
74
 	return out
84
 	return out
75
 }
85
 }
76
 
86
 
87
+func countReplacements(input string, values []string) uint64 {
88
+	var count uint64
89
+	rest := input
90
+	for rest != "" {
91
+		bestAt := -1
92
+		best := ""
93
+		for _, value := range values {
94
+			at := strings.Index(rest, value)
95
+			if at < 0 {
96
+				continue
97
+			}
98
+			if bestAt == -1 || at < bestAt || (at == bestAt && len(value) > len(best)) {
99
+				bestAt = at
100
+				best = value
101
+			}
102
+		}
103
+		if bestAt == -1 {
104
+			return count
105
+		}
106
+		count++
107
+		rest = rest[bestAt+len(best):]
108
+	}
109
+	return count
110
+}
111
+
77
 func (s *Scrubber) pendingSuffixLen(combined string) int {
112
 func (s *Scrubber) pendingSuffixLen(combined string) int {
78
 	keep := 0
113
 	keep := 0
79
 	for _, secret := range s.values {
114
 	for _, secret := range s.values {
internal/runner/scrub/scrub_test.gomodified
@@ -12,6 +12,9 @@ func TestScrubber_MasksPlainAndMultilineSecrets(t *testing.T) {
12
 	if got != want {
12
 	if got != want {
13
 		t.Fatalf("scrubbed:\ngot  %q\nwant %q", got, want)
13
 		t.Fatalf("scrubbed:\ngot  %q\nwant %q", got, want)
14
 	}
14
 	}
15
+	if s.Replacements() != 2 {
16
+		t.Fatalf("replacements: got %d, want 2", s.Replacements())
17
+	}
15
 }
18
 }
16
 
19
 
17
 func TestScrubber_MasksAcrossChunkBoundary(t *testing.T) {
20
 func TestScrubber_MasksAcrossChunkBoundary(t *testing.T) {
@@ -24,6 +27,9 @@ func TestScrubber_MasksAcrossChunkBoundary(t *testing.T) {
24
 	if got != want {
27
 	if got != want {
25
 		t.Fatalf("scrubbed:\ngot  %q\nwant %q", got, want)
28
 		t.Fatalf("scrubbed:\ngot  %q\nwant %q", got, want)
26
 	}
29
 	}
30
+	if s.Replacements() != 1 {
31
+		t.Fatalf("replacements: got %d, want 1", s.Replacements())
32
+	}
27
 }
33
 }
28
 
34
 
29
 func TestScrubber_NoSecretsIsCopyingNoop(t *testing.T) {
35
 func TestScrubber_NoSecretsIsCopyingNoop(t *testing.T) {
internal/web/auth_wiring.gomodified
@@ -41,6 +41,7 @@ func buildAPIHandlers(
41
 	pool *pgxpool.Pool,
41
 	pool *pgxpool.Pool,
42
 	objectStore storage.ObjectStore,
42
 	objectStore storage.ObjectStore,
43
 	runnerJWT *runnerjwt.Signer,
43
 	runnerJWT *runnerjwt.Signer,
44
+	secretBox *secretbox.Box,
44
 	rateLimiter *ratelimit.Limiter,
45
 	rateLimiter *ratelimit.Limiter,
45
 	logger *slog.Logger,
46
 	logger *slog.Logger,
46
 ) (*apih.Handlers, error) {
47
 ) (*apih.Handlers, error) {
@@ -50,6 +51,7 @@ func buildAPIHandlers(
50
 		Logger:      logger,
51
 		Logger:      logger,
51
 		ObjectStore: objectStore,
52
 		ObjectStore: objectStore,
52
 		RunnerJWT:   runnerJWT,
53
 		RunnerJWT:   runnerJWT,
54
+		SecretBox:   secretBox,
53
 		RateLimiter: rateLimiter,
55
 		RateLimiter: rateLimiter,
54
 	})
56
 	})
55
 }
57
 }
internal/web/handlers/api/api.gomodified
@@ -20,6 +20,7 @@ import (
20
 
20
 
21
 	"github.com/tenseleyFlow/shithub/internal/auth/pat"
21
 	"github.com/tenseleyFlow/shithub/internal/auth/pat"
22
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
22
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
23
+	"github.com/tenseleyFlow/shithub/internal/auth/secretbox"
23
 	"github.com/tenseleyFlow/shithub/internal/infra/storage"
24
 	"github.com/tenseleyFlow/shithub/internal/infra/storage"
24
 	"github.com/tenseleyFlow/shithub/internal/ratelimit"
25
 	"github.com/tenseleyFlow/shithub/internal/ratelimit"
25
 	usersdb "github.com/tenseleyFlow/shithub/internal/users/sqlc"
26
 	usersdb "github.com/tenseleyFlow/shithub/internal/users/sqlc"
@@ -34,6 +35,7 @@ type Deps struct {
34
 	Logger      *slog.Logger
35
 	Logger      *slog.Logger
35
 	ObjectStore storage.ObjectStore
36
 	ObjectStore storage.ObjectStore
36
 	RunnerJWT   *runnerjwt.Signer
37
 	RunnerJWT   *runnerjwt.Signer
38
+	SecretBox   *secretbox.Box
37
 	RateLimiter *ratelimit.Limiter
39
 	RateLimiter *ratelimit.Limiter
38
 }
40
 }
39
 
41
 
internal/web/handlers/api/runners.gomodified
@@ -11,6 +11,7 @@ import (
11
 	"io"
11
 	"io"
12
 	"net/http"
12
 	"net/http"
13
 	"regexp"
13
 	"regexp"
14
+	"sort"
14
 	"strconv"
15
 	"strconv"
15
 	"strings"
16
 	"strings"
16
 	"time"
17
 	"time"
@@ -22,12 +23,15 @@ import (
22
 	"github.com/tenseleyFlow/shithub/internal/actions/finalize"
23
 	"github.com/tenseleyFlow/shithub/internal/actions/finalize"
23
 	"github.com/tenseleyFlow/shithub/internal/actions/runnerlabels"
24
 	"github.com/tenseleyFlow/shithub/internal/actions/runnerlabels"
24
 	"github.com/tenseleyFlow/shithub/internal/actions/runnertoken"
25
 	"github.com/tenseleyFlow/shithub/internal/actions/runnertoken"
26
+	"github.com/tenseleyFlow/shithub/internal/actions/secrets"
25
 	actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc"
27
 	actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc"
26
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
28
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
27
 	"github.com/tenseleyFlow/shithub/internal/checks"
29
 	"github.com/tenseleyFlow/shithub/internal/checks"
28
 	checksdb "github.com/tenseleyFlow/shithub/internal/checks/sqlc"
30
 	checksdb "github.com/tenseleyFlow/shithub/internal/checks/sqlc"
29
 	"github.com/tenseleyFlow/shithub/internal/infra/metrics"
31
 	"github.com/tenseleyFlow/shithub/internal/infra/metrics"
30
 	"github.com/tenseleyFlow/shithub/internal/ratelimit"
32
 	"github.com/tenseleyFlow/shithub/internal/ratelimit"
33
+	reposdb "github.com/tenseleyFlow/shithub/internal/repos/sqlc"
34
+	"github.com/tenseleyFlow/shithub/internal/runner/scrub"
31
 	"github.com/tenseleyFlow/shithub/internal/worker"
35
 	"github.com/tenseleyFlow/shithub/internal/worker"
32
 )
36
 )
33
 
37
 
@@ -114,7 +118,13 @@ func (h *Handlers) runnerHeartbeat(w http.ResponseWriter, r *http.Request) {
114
 	}
118
 	}
115
 	metrics.ActionsRunnerHeartbeatsTotal.WithLabelValues("claimed").Inc()
119
 	metrics.ActionsRunnerHeartbeatsTotal.WithLabelValues("claimed").Inc()
116
 	metrics.ActionsRunnerJWTTotal.WithLabelValues("issued").Inc()
120
 	metrics.ActionsRunnerJWTTotal.WithLabelValues("issued").Inc()
117
-	writeJSON(w, http.StatusOK, presentRunnerClaim(job, steps, token, time.Unix(claims.Exp, 0)))
121
+	resolvedSecrets, err := h.resolveVisibleSecrets(r.Context(), job.RepoID)
122
+	if err != nil {
123
+		h.d.Logger.ErrorContext(r.Context(), "runner secret resolution failed", "repo_id", job.RepoID, "job_id", job.ID, "error", err)
124
+		writeAPIError(w, http.StatusInternalServerError, "runner secret resolution failed")
125
+		return
126
+	}
127
+	writeJSON(w, http.StatusOK, presentRunnerClaim(job, steps, resolvedSecrets, token, time.Unix(claims.Exp, 0)))
118
 }
128
 }
119
 
129
 
120
 func (h *Handlers) authenticateRunner(w http.ResponseWriter, r *http.Request) (actionsdb.GetRunnerByTokenHashRow, bool) {
130
 func (h *Handlers) authenticateRunner(w http.ResponseWriter, r *http.Request) (actionsdb.GetRunnerByTokenHashRow, bool) {
@@ -337,15 +347,17 @@ func (h *Handlers) runnerJobLogs(w http.ResponseWriter, r *http.Request) {
337
 		writeAPIError(w, http.StatusBadRequest, "chunk must be between 1 and 524288 bytes")
347
 		writeAPIError(w, http.StatusBadRequest, "chunk must be between 1 and 524288 bytes")
338
 		return
348
 		return
339
 	}
349
 	}
350
+	values, err := h.logMaskValues(r.Context(), auth.Claims.RepoID)
351
+	if err != nil {
352
+		h.d.Logger.ErrorContext(r.Context(), "runner log mask resolution failed", "repo_id", auth.Claims.RepoID, "job_id", auth.Claims.JobID, "error", err)
353
+		writeAPIError(w, http.StatusInternalServerError, "log mask resolution failed")
354
+		return
355
+	}
340
 	stepID, ok := h.resolveLogStep(w, r, auth.Job.ID, body.StepID)
356
 	stepID, ok := h.resolveLogStep(w, r, auth.Job.ID, body.StepID)
341
 	if !ok {
357
 	if !ok {
342
 		return
358
 		return
343
 	}
359
 	}
344
-	if _, err := actionsdb.New().AppendStepLogChunk(r.Context(), h.d.Pool, actionsdb.AppendStepLogChunkParams{
360
+	if err := h.appendScrubbedLogChunk(r.Context(), stepID, body.Seq, chunk, values); err != nil {
345
-		StepID: stepID,
346
-		Seq:    body.Seq,
347
-		Chunk:  chunk,
348
-	}); err != nil && !errors.Is(err, pgx.ErrNoRows) {
349
 		writeAPIError(w, http.StatusInternalServerError, "append log failed")
361
 		writeAPIError(w, http.StatusInternalServerError, "append log failed")
350
 		return
362
 		return
351
 	}
363
 	}
@@ -853,6 +865,189 @@ func (h *Handlers) runnerJobCancelCheck(w http.ResponseWriter, r *http.Request)
853
 	})
865
 	})
854
 }
866
 }
855
 
867
 
868
+func (h *Handlers) resolveVisibleSecrets(ctx context.Context, repoID int64) (map[string]string, error) {
869
+	if h.d.SecretBox == nil {
870
+		return nil, nil
871
+	}
872
+	repo, err := reposdb.New().GetRepoByID(ctx, h.d.Pool, repoID)
873
+	if err != nil {
874
+		return nil, err
875
+	}
876
+	store := secrets.Deps{Pool: h.d.Pool, Box: h.d.SecretBox, Logger: h.d.Logger}
877
+	out := map[string]string{}
878
+	if repo.OwnerOrgID.Valid {
879
+		if err := h.mergeSecrets(ctx, store, secrets.OrgScope(repo.OwnerOrgID.Int64), out); err != nil {
880
+			return nil, err
881
+		}
882
+	}
883
+	if err := h.mergeSecrets(ctx, store, secrets.RepoScope(repo.ID), out); err != nil {
884
+		return nil, err
885
+	}
886
+	if len(out) == 0 {
887
+		return nil, nil
888
+	}
889
+	return out, nil
890
+}
891
+
892
+func (h *Handlers) mergeSecrets(ctx context.Context, store secrets.Deps, scope secrets.Scope, out map[string]string) error {
893
+	items, err := store.List(ctx, scope)
894
+	if err != nil {
895
+		return err
896
+	}
897
+	for _, item := range items {
898
+		plaintext, err := store.Get(ctx, scope, item.Name)
899
+		if err != nil {
900
+			return err
901
+		}
902
+		out[item.Name] = string(plaintext)
903
+	}
904
+	return nil
905
+}
906
+
907
+func (h *Handlers) logMaskValues(ctx context.Context, repoID int64) ([]string, error) {
908
+	resolved, err := h.resolveVisibleSecrets(ctx, repoID)
909
+	if err != nil {
910
+		return nil, err
911
+	}
912
+	return secretMaskValues(resolved), nil
913
+}
914
+
915
+func secretMaskValues(resolved map[string]string) []string {
916
+	if len(resolved) == 0 {
917
+		return nil
918
+	}
919
+	values := make([]string, 0, len(resolved))
920
+	for _, value := range resolved {
921
+		values = append(values, value)
922
+	}
923
+	sort.Strings(values)
924
+	return values
925
+}
926
+
927
+func cloneStringMap(in map[string]string) map[string]string {
928
+	if len(in) == 0 {
929
+		return nil
930
+	}
931
+	out := make(map[string]string, len(in))
932
+	for k, v := range in {
933
+		out[k] = v
934
+	}
935
+	return out
936
+}
937
+
938
+func (h *Handlers) appendScrubbedLogChunk(ctx context.Context, stepID int64, seq int32, chunk []byte, values []string) error {
939
+	q := actionsdb.New()
940
+	if len(values) == 0 {
941
+		_, err := q.AppendStepLogChunk(ctx, h.d.Pool, actionsdb.AppendStepLogChunkParams{
942
+			StepID: stepID,
943
+			Seq:    seq,
944
+			Chunk:  chunk,
945
+		})
946
+		if errors.Is(err, pgx.ErrNoRows) {
947
+			return nil
948
+		}
949
+		return err
950
+	}
951
+
952
+	tx, err := h.d.Pool.Begin(ctx)
953
+	if err != nil {
954
+		return err
955
+	}
956
+	committed := false
957
+	defer func() {
958
+		if !committed {
959
+			_ = tx.Rollback(ctx)
960
+		}
961
+	}()
962
+
963
+	if _, err := q.GetStepLogChunkByStepSeq(ctx, tx, actionsdb.GetStepLogChunkByStepSeqParams{
964
+		StepID: stepID,
965
+		Seq:    seq,
966
+	}); err == nil {
967
+		if err := tx.Commit(ctx); err != nil {
968
+			return err
969
+		}
970
+		committed = true
971
+		return nil
972
+	} else if !errors.Is(err, pgx.ErrNoRows) {
973
+		return err
974
+	}
975
+
976
+	var replacements uint64
977
+	prev, err := q.GetStepLogChunkBefore(ctx, tx, actionsdb.GetStepLogChunkBeforeParams{
978
+		StepID: stepID,
979
+		Seq:    seq,
980
+	})
981
+	if err == nil {
982
+		if carry := scrubCarryLen(prev.Chunk, values); carry > 0 {
983
+			prefix := append([]byte(nil), prev.Chunk[:len(prev.Chunk)-carry]...)
984
+			combined := append(append([]byte(nil), prev.Chunk[len(prev.Chunk)-carry:]...), chunk...)
985
+			chunk, replacements = scrubChunk(combined, values)
986
+			if err := q.UpdateStepLogChunk(ctx, tx, actionsdb.UpdateStepLogChunkParams{
987
+				ID:    prev.ID,
988
+				Chunk: prefix,
989
+			}); err != nil {
990
+				return err
991
+			}
992
+		} else {
993
+			chunk, replacements = scrubChunk(chunk, values)
994
+		}
995
+	} else if errors.Is(err, pgx.ErrNoRows) {
996
+		chunk, replacements = scrubChunk(chunk, values)
997
+	} else {
998
+		return err
999
+	}
1000
+
1001
+	if _, err := q.AppendStepLogChunk(ctx, tx, actionsdb.AppendStepLogChunkParams{
1002
+		StepID: stepID,
1003
+		Seq:    seq,
1004
+		Chunk:  chunk,
1005
+	}); err != nil && !errors.Is(err, pgx.ErrNoRows) {
1006
+		return err
1007
+	}
1008
+	if err := tx.Commit(ctx); err != nil {
1009
+		return err
1010
+	}
1011
+	committed = true
1012
+	if replacements > 0 {
1013
+		metrics.ActionsLogScrubReplacementsTotal.WithLabelValues("server").Add(float64(replacements))
1014
+	}
1015
+	return nil
1016
+}
1017
+
1018
+func scrubChunk(chunk []byte, values []string) ([]byte, uint64) {
1019
+	if len(values) == 0 {
1020
+		return chunk, 0
1021
+	}
1022
+	s := scrub.New(values)
1023
+	out := s.Scrub(chunk)
1024
+	return append(out, s.Flush()...), s.Replacements()
1025
+}
1026
+
1027
+func scrubCarryLen(chunk []byte, values []string) int {
1028
+	if len(chunk) == 0 || len(values) == 0 {
1029
+		return 0
1030
+	}
1031
+	text := string(chunk)
1032
+	keep := 0
1033
+	for _, value := range values {
1034
+		if value == "" {
1035
+			continue
1036
+		}
1037
+		max := len(value) - 1
1038
+		if max > len(text) {
1039
+			max = len(text)
1040
+		}
1041
+		for n := max; n > keep; n-- {
1042
+			if strings.HasSuffix(text, value[:n]) {
1043
+				keep = n
1044
+				break
1045
+			}
1046
+		}
1047
+	}
1048
+	return keep
1049
+}
1050
+
856
 func (h *Handlers) writeNextTokenResponse(
1051
 func (h *Handlers) writeNextTokenResponse(
857
 	w http.ResponseWriter,
1052
 	w http.ResponseWriter,
858
 	r *http.Request,
1053
 	r *http.Request,
@@ -884,25 +1079,27 @@ type runnerClaimResponse struct {
884
 }
1079
 }
885
 
1080
 
886
 type runnerJobPayload struct {
1081
 type runnerJobPayload struct {
887
-	ID             int64           `json:"id"`
1082
+	ID             int64             `json:"id"`
888
-	RunID          int64           `json:"run_id"`
1083
+	RunID          int64             `json:"run_id"`
889
-	RepoID         int64           `json:"repo_id"`
1084
+	RepoID         int64             `json:"repo_id"`
890
-	RunIndex       int64           `json:"run_index"`
1085
+	RunIndex       int64             `json:"run_index"`
891
-	WorkflowFile   string          `json:"workflow_file"`
1086
+	WorkflowFile   string            `json:"workflow_file"`
892
-	WorkflowName   string          `json:"workflow_name"`
1087
+	WorkflowName   string            `json:"workflow_name"`
893
-	HeadSHA        string          `json:"head_sha"`
1088
+	HeadSHA        string            `json:"head_sha"`
894
-	HeadRef        string          `json:"head_ref"`
1089
+	HeadRef        string            `json:"head_ref"`
895
-	Event          string          `json:"event"`
1090
+	Event          string            `json:"event"`
896
-	EventPayload   json.RawMessage `json:"event_payload"`
1091
+	EventPayload   json.RawMessage   `json:"event_payload"`
897
-	JobKey         string          `json:"job_key"`
1092
+	JobKey         string            `json:"job_key"`
898
-	JobName        string          `json:"job_name"`
1093
+	JobName        string            `json:"job_name"`
899
-	RunsOn         string          `json:"runs_on"`
1094
+	RunsOn         string            `json:"runs_on"`
900
-	Needs          []string        `json:"needs"`
1095
+	Needs          []string          `json:"needs"`
901
-	If             string          `json:"if"`
1096
+	If             string            `json:"if"`
902
-	TimeoutMinutes int32           `json:"timeout_minutes"`
1097
+	TimeoutMinutes int32             `json:"timeout_minutes"`
903
-	Permissions    json.RawMessage `json:"permissions"`
1098
+	Permissions    json.RawMessage   `json:"permissions"`
904
-	Env            json.RawMessage `json:"env"`
1099
+	Secrets        map[string]string `json:"secrets"`
905
-	Steps          []runnerStep    `json:"steps"`
1100
+	MaskValues     []string          `json:"mask_values"`
1101
+	Env            json.RawMessage   `json:"env"`
1102
+	Steps          []runnerStep      `json:"steps"`
906
 }
1103
 }
907
 
1104
 
908
 type runnerStep struct {
1105
 type runnerStep struct {
@@ -922,6 +1119,7 @@ type runnerStep struct {
922
 func presentRunnerClaim(
1119
 func presentRunnerClaim(
923
 	job actionsdb.ClaimQueuedWorkflowJobRow,
1120
 	job actionsdb.ClaimQueuedWorkflowJobRow,
924
 	steps []actionsdb.ListRunnerStepsForJobRow,
1121
 	steps []actionsdb.ListRunnerStepsForJobRow,
1122
+	resolvedSecrets map[string]string,
925
 	token string,
1123
 	token string,
926
 	expiresAt time.Time,
1124
 	expiresAt time.Time,
927
 ) runnerClaimResponse {
1125
 ) runnerClaimResponse {
@@ -962,6 +1160,8 @@ func presentRunnerClaim(
962
 			If:             job.IfExpr,
1160
 			If:             job.IfExpr,
963
 			TimeoutMinutes: job.TimeoutMinutes,
1161
 			TimeoutMinutes: job.TimeoutMinutes,
964
 			Permissions:    rawJSONOrObject(job.Permissions),
1162
 			Permissions:    rawJSONOrObject(job.Permissions),
1163
+			Secrets:        cloneStringMap(resolvedSecrets),
1164
+			MaskValues:     secretMaskValues(resolvedSecrets),
965
 			Env:            rawJSONOrObject(job.JobEnv),
1165
 			Env:            rawJSONOrObject(job.JobEnv),
966
 			Steps:          outSteps,
1166
 			Steps:          outSteps,
967
 		},
1167
 		},
internal/web/handlers/api/runners_test.gomodified
@@ -22,10 +22,12 @@ import (
22
 
22
 
23
 	"github.com/tenseleyFlow/shithub/internal/actions/finalize"
23
 	"github.com/tenseleyFlow/shithub/internal/actions/finalize"
24
 	"github.com/tenseleyFlow/shithub/internal/actions/runnertoken"
24
 	"github.com/tenseleyFlow/shithub/internal/actions/runnertoken"
25
+	actionsecrets "github.com/tenseleyFlow/shithub/internal/actions/secrets"
25
 	actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc"
26
 	actionsdb "github.com/tenseleyFlow/shithub/internal/actions/sqlc"
26
 	"github.com/tenseleyFlow/shithub/internal/actions/trigger"
27
 	"github.com/tenseleyFlow/shithub/internal/actions/trigger"
27
 	"github.com/tenseleyFlow/shithub/internal/actions/workflow"
28
 	"github.com/tenseleyFlow/shithub/internal/actions/workflow"
28
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
29
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
30
+	"github.com/tenseleyFlow/shithub/internal/auth/secretbox"
29
 	"github.com/tenseleyFlow/shithub/internal/infra/storage"
31
 	"github.com/tenseleyFlow/shithub/internal/infra/storage"
30
 	reposdb "github.com/tenseleyFlow/shithub/internal/repos/sqlc"
32
 	reposdb "github.com/tenseleyFlow/shithub/internal/repos/sqlc"
31
 	"github.com/tenseleyFlow/shithub/internal/testing/dbtest"
33
 	"github.com/tenseleyFlow/shithub/internal/testing/dbtest"
@@ -164,6 +166,133 @@ func TestRunnerHeartbeatClaimsQueuedJob(t *testing.T) {
164
 	}
166
 	}
165
 }
167
 }
166
 
168
 
169
+func TestRunnerSecretsAreClaimedAndServerScrubsLogs(t *testing.T) {
170
+	ctx := context.Background()
171
+	pool := dbtest.NewTestDB(t)
172
+	logger := slog.New(slog.NewTextHandler(io.Discard, nil))
173
+	repoID, userID := setupRunnerAPIRepo(t, pool)
174
+	runID := enqueueRunnerAPIRun(t, pool, logger, repoID, userID)
175
+	box := testRunnerAPISecretBox(t)
176
+	if err := (actionsecrets.Deps{Pool: pool, Box: box}).Set(ctx, actionsecrets.RepoScope(repoID), "TOKEN", []byte("hunter2"), userID); err != nil {
177
+		t.Fatalf("Set secret: %v", err)
178
+	}
179
+
180
+	token, _ := registerRunnerForTest(t, pool, []string{"ubuntu-latest", "linux"}, 1)
181
+	signer := runnerAPISigner(t, time.Date(2026, 5, 10, 12, 0, 0, 0, time.UTC))
182
+	router := newRunnerAPIRouterWithSecretBox(t, pool, logger, signer, box)
183
+
184
+	req := httptest.NewRequest(http.MethodPost, "/api/v1/runners/heartbeat",
185
+		strings.NewReader(`{"labels":["ubuntu-latest","linux"],"capacity":1}`))
186
+	req.Header.Set("Authorization", "Bearer "+token)
187
+	rr := httptest.NewRecorder()
188
+	router.ServeHTTP(rr, req)
189
+	if rr.Code != http.StatusOK {
190
+		t.Fatalf("heartbeat status: got %d, want 200; body=%s", rr.Code, rr.Body.String())
191
+	}
192
+	var claim struct {
193
+		Token string `json:"token"`
194
+		Job   struct {
195
+			ID         int64             `json:"id"`
196
+			RunID      int64             `json:"run_id"`
197
+			Secrets    map[string]string `json:"secrets"`
198
+			MaskValues []string          `json:"mask_values"`
199
+		} `json:"job"`
200
+	}
201
+	if err := json.Unmarshal(rr.Body.Bytes(), &claim); err != nil {
202
+		t.Fatalf("decode claim: %v", err)
203
+	}
204
+	if claim.Job.RunID != runID || claim.Job.Secrets["TOKEN"] != "hunter2" || !containsString(claim.Job.MaskValues, "hunter2") {
205
+		t.Fatalf("claim did not include masked secret context: %+v", claim.Job)
206
+	}
207
+
208
+	rawLog := []byte("before hunter2 after\n")
209
+	logBody := fmt.Sprintf(`{"seq":0,"chunk":%q}`, base64.StdEncoding.EncodeToString(rawLog))
210
+	req = httptest.NewRequest(http.MethodPost, fmt.Sprintf("/api/v1/jobs/%d/logs", claim.Job.ID), strings.NewReader(logBody))
211
+	req.Header.Set("Authorization", "Bearer "+claim.Token)
212
+	rr = httptest.NewRecorder()
213
+	router.ServeHTTP(rr, req)
214
+	if rr.Code != http.StatusAccepted {
215
+		t.Fatalf("logs status: got %d, want 202; body=%s", rr.Code, rr.Body.String())
216
+	}
217
+	step, err := actionsdb.New().GetFirstStepForJob(ctx, pool, claim.Job.ID)
218
+	if err != nil {
219
+		t.Fatalf("GetFirstStepForJob: %v", err)
220
+	}
221
+	chunks, err := actionsdb.New().ListStepLogChunks(ctx, pool, actionsdb.ListStepLogChunksParams{
222
+		StepID: step.ID,
223
+		Seq:    -1,
224
+		Limit:  10,
225
+	})
226
+	if err != nil {
227
+		t.Fatalf("ListStepLogChunks: %v", err)
228
+	}
229
+	if len(chunks) != 1 {
230
+		t.Fatalf("chunks: %#v", chunks)
231
+	}
232
+	got := string(chunks[0].Chunk)
233
+	if strings.Contains(got, "hunter2") || got != "before *** after\n" {
234
+		t.Fatalf("stored log chunk was not scrubbed: %q", got)
235
+	}
236
+}
237
+
238
+func TestRunnerServerScrubsSecretSplitAcrossLogPosts(t *testing.T) {
239
+	ctx := context.Background()
240
+	pool := dbtest.NewTestDB(t)
241
+	logger := slog.New(slog.NewTextHandler(io.Discard, nil))
242
+	repoID, userID := setupRunnerAPIRepo(t, pool)
243
+	enqueueRunnerAPIRun(t, pool, logger, repoID, userID)
244
+	box := testRunnerAPISecretBox(t)
245
+	if err := (actionsecrets.Deps{Pool: pool, Box: box}).Set(ctx, actionsecrets.RepoScope(repoID), "TOKEN", []byte("hunter2"), userID); err != nil {
246
+		t.Fatalf("Set secret: %v", err)
247
+	}
248
+
249
+	token, _ := registerRunnerForTest(t, pool, []string{"ubuntu-latest", "linux"}, 1)
250
+	signer := runnerAPISigner(t, time.Date(2026, 5, 10, 12, 0, 0, 0, time.UTC))
251
+	router := newRunnerAPIRouterWithSecretBox(t, pool, logger, signer, box)
252
+
253
+	req := httptest.NewRequest(http.MethodPost, "/api/v1/runners/heartbeat",
254
+		strings.NewReader(`{"labels":["ubuntu-latest","linux"],"capacity":1}`))
255
+	req.Header.Set("Authorization", "Bearer "+token)
256
+	rr := httptest.NewRecorder()
257
+	router.ServeHTTP(rr, req)
258
+	if rr.Code != http.StatusOK {
259
+		t.Fatalf("heartbeat status: got %d, want 200; body=%s", rr.Code, rr.Body.String())
260
+	}
261
+	var claim struct {
262
+		Token string `json:"token"`
263
+		Job   struct {
264
+			ID int64 `json:"id"`
265
+		} `json:"job"`
266
+	}
267
+	if err := json.Unmarshal(rr.Body.Bytes(), &claim); err != nil {
268
+		t.Fatalf("decode claim: %v", err)
269
+	}
270
+
271
+	next := postRunnerLogChunk(t, router, claim.Job.ID, claim.Token, 0, []byte("before hun"))
272
+	next = postRunnerLogChunk(t, router, claim.Job.ID, next, 1, []byte("ter2 after\n"))
273
+
274
+	step, err := actionsdb.New().GetFirstStepForJob(ctx, pool, claim.Job.ID)
275
+	if err != nil {
276
+		t.Fatalf("GetFirstStepForJob: %v", err)
277
+	}
278
+	chunks, err := actionsdb.New().ListStepLogChunks(ctx, pool, actionsdb.ListStepLogChunksParams{
279
+		StepID: step.ID,
280
+		Seq:    -1,
281
+		Limit:  10,
282
+	})
283
+	if err != nil {
284
+		t.Fatalf("ListStepLogChunks: %v", err)
285
+	}
286
+	var combined strings.Builder
287
+	for _, chunk := range chunks {
288
+		combined.Write(chunk.Chunk)
289
+	}
290
+	got := combined.String()
291
+	if strings.Contains(got, "hunter2") || got != "before *** after\n" {
292
+		t.Fatalf("stored log chunks were not scrubbed across boundary: chunks=%#v combined=%q next=%q", chunks, got, next)
293
+	}
294
+}
295
+
167
 func TestRunnerHeartbeatRejectsBadToken(t *testing.T) {
296
 func TestRunnerHeartbeatRejectsBadToken(t *testing.T) {
168
 	pool := dbtest.NewTestDB(t)
297
 	pool := dbtest.NewTestDB(t)
169
 	router := newRunnerAPIRouter(t, pool, slog.New(slog.NewTextHandler(io.Discard, nil)), runnerAPISigner(t, time.Now()))
298
 	router := newRunnerAPIRouter(t, pool, slog.New(slog.NewTextHandler(io.Discard, nil)), runnerAPISigner(t, time.Now()))
@@ -311,6 +440,28 @@ func TestRunnerStepStatusEnqueuesFinalizeWorker(t *testing.T) {
311
 	}
440
 	}
312
 }
441
 }
313
 
442
 
443
+func postRunnerLogChunk(t *testing.T, router http.Handler, jobID int64, token string, seq int32, chunk []byte) string {
444
+	t.Helper()
445
+	body := fmt.Sprintf(`{"seq":%d,"chunk":%q}`, seq, base64.StdEncoding.EncodeToString(chunk))
446
+	req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("/api/v1/jobs/%d/logs", jobID), strings.NewReader(body))
447
+	req.Header.Set("Authorization", "Bearer "+token)
448
+	rr := httptest.NewRecorder()
449
+	router.ServeHTTP(rr, req)
450
+	if rr.Code != http.StatusAccepted {
451
+		t.Fatalf("logs status: got %d, want 202; body=%s", rr.Code, rr.Body.String())
452
+	}
453
+	var resp struct {
454
+		NextToken string `json:"next_token"`
455
+	}
456
+	if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil {
457
+		t.Fatalf("decode log response: %v", err)
458
+	}
459
+	if resp.NextToken == "" {
460
+		t.Fatalf("empty next token in log response: %s", rr.Body.String())
461
+	}
462
+	return resp.NextToken
463
+}
464
+
314
 func newRunnerAPIRouter(
465
 func newRunnerAPIRouter(
315
 	t *testing.T,
466
 	t *testing.T,
316
 	pool *pgxpool.Pool,
467
 	pool *pgxpool.Pool,
@@ -337,6 +488,41 @@ func newRunnerAPIRouter(
337
 	return r
488
 	return r
338
 }
489
 }
339
 
490
 
491
+func newRunnerAPIRouterWithSecretBox(
492
+	t *testing.T,
493
+	pool *pgxpool.Pool,
494
+	logger *slog.Logger,
495
+	signer *runnerjwt.Signer,
496
+	box *secretbox.Box,
497
+) http.Handler {
498
+	t.Helper()
499
+	h, err := apih.New(apih.Deps{
500
+		Pool:      pool,
501
+		Logger:    logger,
502
+		RunnerJWT: signer,
503
+		SecretBox: box,
504
+	})
505
+	if err != nil {
506
+		t.Fatalf("api.New: %v", err)
507
+	}
508
+	r := chi.NewRouter()
509
+	h.Mount(r)
510
+	return r
511
+}
512
+
513
+func testRunnerAPISecretBox(t *testing.T) *secretbox.Box {
514
+	t.Helper()
515
+	key, err := secretbox.GenerateKey()
516
+	if err != nil {
517
+		t.Fatalf("GenerateKey: %v", err)
518
+	}
519
+	box, err := secretbox.FromBytes(key)
520
+	if err != nil {
521
+		t.Fatalf("secretbox.FromBytes: %v", err)
522
+	}
523
+	return box
524
+}
525
+
340
 func setupRunnerAPIRepo(t *testing.T, pool *pgxpool.Pool) (repoID, userID int64) {
526
 func setupRunnerAPIRepo(t *testing.T, pool *pgxpool.Pool) (repoID, userID int64) {
341
 	t.Helper()
527
 	t.Helper()
342
 	ctx := context.Background()
528
 	ctx := context.Background()
@@ -420,6 +606,15 @@ func registerRunnerForTest(t *testing.T, pool *pgxpool.Pool, labels []string, ca
420
 	return token, runner.ID
606
 	return token, runner.ID
421
 }
607
 }
422
 
608
 
609
+func containsString(items []string, want string) bool {
610
+	for _, item := range items {
611
+		if item == want {
612
+			return true
613
+		}
614
+	}
615
+	return false
616
+}
617
+
423
 func runnerAPISigner(t *testing.T, now time.Time) *runnerjwt.Signer {
618
 func runnerAPISigner(t *testing.T, now time.Time) *runnerjwt.Signer {
424
 	t.Helper()
619
 	t.Helper()
425
 	signer, err := runnerjwt.NewFromKey(
620
 	signer, err := runnerjwt.NewFromKey(
internal/web/server.gomodified
@@ -24,6 +24,7 @@ import (
24
 	"golang.org/x/crypto/chacha20poly1305"
24
 	"golang.org/x/crypto/chacha20poly1305"
25
 
25
 
26
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
26
 	"github.com/tenseleyFlow/shithub/internal/auth/runnerjwt"
27
+	"github.com/tenseleyFlow/shithub/internal/auth/secretbox"
27
 	"github.com/tenseleyFlow/shithub/internal/auth/session"
28
 	"github.com/tenseleyFlow/shithub/internal/auth/session"
28
 	"github.com/tenseleyFlow/shithub/internal/infra/config"
29
 	"github.com/tenseleyFlow/shithub/internal/infra/config"
29
 	"github.com/tenseleyFlow/shithub/internal/infra/db"
30
 	"github.com/tenseleyFlow/shithub/internal/infra/db"
@@ -165,17 +166,24 @@ func Run(ctx context.Context, opts Options) error {
165
 		}
166
 		}
166
 		deps.AuthMounter = auth.Mount
167
 		deps.AuthMounter = auth.Mount
167
 
168
 
168
-		var runnerJWT *runnerjwt.Signer
169
+		var (
170
+			runnerJWT  *runnerjwt.Signer
171
+			actionsBox *secretbox.Box
172
+		)
169
 		if cfg.Auth.TOTPKeyB64 != "" {
173
 		if cfg.Auth.TOTPKeyB64 != "" {
170
 			runnerJWT, err = runnerjwt.NewFromTOTPKeyB64(cfg.Auth.TOTPKeyB64)
174
 			runnerJWT, err = runnerjwt.NewFromTOTPKeyB64(cfg.Auth.TOTPKeyB64)
171
 			if err != nil {
175
 			if err != nil {
172
 				return fmt.Errorf("runner jwt: %w", err)
176
 				return fmt.Errorf("runner jwt: %w", err)
173
 			}
177
 			}
178
+			actionsBox, err = secretbox.FromBase64(cfg.Auth.TOTPKeyB64)
179
+			if err != nil {
180
+				return fmt.Errorf("actions secretbox: %w", err)
181
+			}
174
 		} else {
182
 		} else {
175
 			logger.Warn("actions runner API disabled: auth.totp_key_b64 is not configured",
183
 			logger.Warn("actions runner API disabled: auth.totp_key_b64 is not configured",
176
 				"hint", "set SHITHUB_TOTP_KEY=$(openssl rand -base64 32) to enable runner job JWTs")
184
 				"hint", "set SHITHUB_TOTP_KEY=$(openssl rand -base64 32) to enable runner job JWTs")
177
 		}
185
 		}
178
-		api, err := buildAPIHandlers(pool, objectStore, runnerJWT, ratelimit.New(pool), logger)
186
+		api, err := buildAPIHandlers(pool, objectStore, runnerJWT, actionsBox, ratelimit.New(pool), logger)
179
 		if err != nil {
187
 		if err != nil {
180
 			return fmt.Errorf("api handlers: %w", err)
188
 			return fmt.Errorf("api handlers: %w", err)
181
 		}
189
 		}