Go · 2683 bytes Raw Blame History
1 // SPDX-License-Identifier: AGPL-3.0-or-later
2
3 package main
4
5 import (
6 "context"
7 "errors"
8 "fmt"
9 "time"
10
11 "github.com/spf13/cobra"
12
13 "github.com/tenseleyFlow/shithub/internal/infra/config"
14 "github.com/tenseleyFlow/shithub/internal/infra/db"
15 "github.com/tenseleyFlow/shithub/internal/repos/sigverify"
16 )
17
18 // gpgBackfillAllCmd is the operator-facing entrypoint that fans out
19 // one gpg:backfill worker job per active repo. Typical usage: run
20 // once at S51 first-deploy so existing signed commits and tags get
21 // commit_verification_cache rows stamped, after which "Verified"
22 // badges appear retroactively across the site.
23 //
24 // Idempotent: re-running enqueues another wave of jobs. The workers
25 // themselves are idempotent thanks to UpsertCommitVerification's ON
26 // CONFLICT clause; the only cost of re-running is the redundant
27 // enqueue + per-commit verify work, not data integrity.
28 //
29 // This is NOT an inline-blocking command — it enqueues and returns.
30 // Watch worker logs (or `SELECT * FROM jobs WHERE kind = 'gpg:backfill'
31 // ORDER BY created_at DESC`) to follow progress.
32 var gpgBackfillAllCmd = &cobra.Command{
33 Use: "gpg-backfill-all",
34 Short: "Enqueue GPG signature backfill for every active repo",
35 Long: `Walks every active repo and enqueues a gpg:backfill worker job per
36 repo. Each job verifies every signed commit on the repo's default
37 branch + every annotated tag, writing commit_verification_cache
38 rows so the Verified badge appears retroactively.
39
40 This is the one-shot operator command. The per-user-key eager
41 backfill (DispatchForKey) is wired into the GPG-key add handler
42 and runs automatically on key upload — you only need to run this
43 on first deploy or after a known mass-key-upload event.`,
44 RunE: func(cmd *cobra.Command, _ []string) error {
45 cfg, err := config.Load(nil)
46 if err != nil {
47 return err
48 }
49 if cfg.DB.URL == "" {
50 return errors.New("gpg-backfill-all: DB not configured")
51 }
52
53 // 5-minute cap on the enqueue step. The enqueue work itself is
54 // fast (one INSERT per repo + one NOTIFY); the cap is a
55 // pessimistic ceiling for sites with millions of repos.
56 ctx, cancel := context.WithTimeout(cmd.Context(), 5*time.Minute)
57 defer cancel()
58
59 pool, err := db.Open(ctx, db.Config{
60 URL: cfg.DB.URL, MaxConns: 2, MinConns: 0,
61 ConnectTimeout: cfg.DB.ConnectTimeout,
62 })
63 if err != nil {
64 return fmt.Errorf("db open: %w", err)
65 }
66 defer pool.Close()
67
68 count, err := sigverify.DispatchAll(ctx, pool)
69 if err != nil {
70 return fmt.Errorf("dispatch: %w", err)
71 }
72
73 _, _ = fmt.Fprintf(cmd.OutOrStdout(),
74 "gpg-backfill-all: enqueued %d job(s); follow progress via worker logs\n",
75 count,
76 )
77 return nil
78 },
79 }
80