| 1 | // SPDX-License-Identifier: AGPL-3.0-or-later |
| 2 | |
| 3 | package main |
| 4 | |
| 5 | import ( |
| 6 | "context" |
| 7 | "crypto/rand" |
| 8 | "encoding/hex" |
| 9 | "errors" |
| 10 | "fmt" |
| 11 | "io" |
| 12 | "os" |
| 13 | "strings" |
| 14 | "time" |
| 15 | |
| 16 | "github.com/spf13/cobra" |
| 17 | |
| 18 | "github.com/tenseleyFlow/shithub/internal/infra/config" |
| 19 | "github.com/tenseleyFlow/shithub/internal/infra/storage" |
| 20 | ) |
| 21 | |
| 22 | var storageCmd = &cobra.Command{ |
| 23 | Use: "storage", |
| 24 | Short: "Storage health checks and operational helpers", |
| 25 | } |
| 26 | |
| 27 | var storageCheckCmd = &cobra.Command{ |
| 28 | Use: "check", |
| 29 | Short: "Verify object-store round-trip and repos-root writability", |
| 30 | Long: `Exits 0 when both: |
| 31 | (a) PUT and GET succeed against the configured S3-compatible object bucket, and |
| 32 | (b) the configured repos_root is writable. |
| 33 | |
| 34 | Production uses DigitalOcean Spaces through that S3-compatible API. When the |
| 35 | S3 block is unconfigured, only (b) is checked. Used in deploy smoke tests and |
| 36 | as a sanity check from the operator's terminal.`, |
| 37 | RunE: func(cmd *cobra.Command, _ []string) error { |
| 38 | cfg, err := config.Load(nil) |
| 39 | if err != nil { |
| 40 | return err |
| 41 | } |
| 42 | ctx, cancel := context.WithTimeout(cmd.Context(), 30*time.Second) |
| 43 | defer cancel() |
| 44 | |
| 45 | var failures []string |
| 46 | |
| 47 | out := cmd.OutOrStdout() |
| 48 | if err := checkReposRoot(cfg.Storage.ReposRoot); err != nil { |
| 49 | failures = append(failures, fmt.Sprintf("repos_root: %v", err)) |
| 50 | } else { |
| 51 | _, _ = fmt.Fprintf(out, "repos_root: ok (%s)\n", cfg.Storage.ReposRoot) |
| 52 | } |
| 53 | |
| 54 | if cfg.Storage.S3.Endpoint == "" { |
| 55 | _, _ = fmt.Fprintln(out, "s3: skipped (storage.s3 not configured)") |
| 56 | } else if err := checkS3(ctx, cfg.Storage.S3); err != nil { |
| 57 | failures = append(failures, fmt.Sprintf("s3: %v", err)) |
| 58 | } else { |
| 59 | _, _ = fmt.Fprintf(out, "s3: ok (endpoint=%s bucket=%s)\n", |
| 60 | cfg.Storage.S3.Endpoint, cfg.Storage.S3.Bucket) |
| 61 | } |
| 62 | |
| 63 | if len(failures) > 0 { |
| 64 | return fmt.Errorf("storage check failed:\n - %s", strings.Join(failures, "\n - ")) |
| 65 | } |
| 66 | return nil |
| 67 | }, |
| 68 | } |
| 69 | |
| 70 | var storageRepairSharedPermsCmd = &cobra.Command{ |
| 71 | Use: "repair-shared-perms", |
| 72 | Short: "Bring existing bare repos to core.sharedRepository=group + g+w mode bits", |
| 73 | Long: `One-time backfill for repos created before SR2 #287 landed. |
| 74 | |
| 75 | Pre-fix, bare repos were created with 'git init --bare' (no |
| 76 | --shared=group), so objects/ wound up 0755. The SSH-git push path |
| 77 | (git-receive-pack runs as the 'git' user, repos owned by 'shithub') |
| 78 | hit "unable to create temporary object directory" because the group |
| 79 | write bit was missing. |
| 80 | |
| 81 | This subcommand walks every <prefix>/<owner>/<name>.git directory |
| 82 | under storage.repos_root and: |
| 83 | - sets core.sharedRepository=group in each repo's config |
| 84 | - chmods every file g+w |
| 85 | - chmods every directory g+w + g+s so future writes inherit group |
| 86 | |
| 87 | Idempotent. Safe to re-run. Reports a per-repo summary.`, |
| 88 | RunE: func(cmd *cobra.Command, _ []string) error { |
| 89 | cfg, err := config.Load(nil) |
| 90 | if err != nil { |
| 91 | return err |
| 92 | } |
| 93 | root := cfg.Storage.ReposRoot |
| 94 | if root == "" { |
| 95 | return errors.New("storage.repos_root not configured") |
| 96 | } |
| 97 | fs, err := storage.NewRepoFS(root) |
| 98 | if err != nil { |
| 99 | return fmt.Errorf("repofs: %w", err) |
| 100 | } |
| 101 | ctx, cancel := context.WithTimeout(cmd.Context(), 5*time.Minute) |
| 102 | defer cancel() |
| 103 | out := cmd.OutOrStdout() |
| 104 | var ok, fail int |
| 105 | // Walk the canonical layout: <root>/<2-letter-prefix>/<owner>/<name>.git |
| 106 | entries, err := os.ReadDir(root) |
| 107 | if err != nil { |
| 108 | return fmt.Errorf("read repos_root: %w", err) |
| 109 | } |
| 110 | for _, prefix := range entries { |
| 111 | if !prefix.IsDir() { |
| 112 | continue |
| 113 | } |
| 114 | ownerEntries, err := os.ReadDir(root + "/" + prefix.Name()) |
| 115 | if err != nil { |
| 116 | continue |
| 117 | } |
| 118 | for _, owner := range ownerEntries { |
| 119 | if !owner.IsDir() { |
| 120 | continue |
| 121 | } |
| 122 | ownerDir := root + "/" + prefix.Name() + "/" + owner.Name() |
| 123 | repos, err := os.ReadDir(ownerDir) |
| 124 | if err != nil { |
| 125 | continue |
| 126 | } |
| 127 | for _, repo := range repos { |
| 128 | if !repo.IsDir() || !strings.HasSuffix(repo.Name(), ".git") { |
| 129 | continue |
| 130 | } |
| 131 | path := ownerDir + "/" + repo.Name() |
| 132 | if err := fs.RepairSharedPerms(ctx, path); err != nil { |
| 133 | _, _ = fmt.Fprintf(out, "FAIL %s: %v\n", path, err) |
| 134 | fail++ |
| 135 | continue |
| 136 | } |
| 137 | _, _ = fmt.Fprintf(out, "ok %s\n", path) |
| 138 | ok++ |
| 139 | } |
| 140 | } |
| 141 | } |
| 142 | _, _ = fmt.Fprintf(out, "\nrepaired %d repo(s); %d failure(s)\n", ok, fail) |
| 143 | if fail > 0 { |
| 144 | return fmt.Errorf("repair-shared-perms: %d failures", fail) |
| 145 | } |
| 146 | return nil |
| 147 | }, |
| 148 | } |
| 149 | |
| 150 | func init() { |
| 151 | storageCmd.AddCommand(storageCheckCmd) |
| 152 | storageCmd.AddCommand(storageRepairSharedPermsCmd) |
| 153 | } |
| 154 | |
| 155 | func checkReposRoot(root string) error { |
| 156 | if root == "" { |
| 157 | return errors.New("not configured") |
| 158 | } |
| 159 | info, err := os.Stat(root) |
| 160 | if err != nil { |
| 161 | return fmt.Errorf("stat: %w", err) |
| 162 | } |
| 163 | if !info.IsDir() { |
| 164 | return fmt.Errorf("%s is not a directory", root) |
| 165 | } |
| 166 | probe, err := os.CreateTemp(root, ".storage-check-") |
| 167 | if err != nil { |
| 168 | return fmt.Errorf("write probe: %w", err) |
| 169 | } |
| 170 | name := probe.Name() |
| 171 | _ = probe.Close() |
| 172 | if err := os.Remove(name); err != nil { |
| 173 | return fmt.Errorf("cleanup probe: %w", err) |
| 174 | } |
| 175 | return nil |
| 176 | } |
| 177 | |
| 178 | func checkS3(ctx context.Context, s config.S3StorageConfig) error { |
| 179 | store, err := storage.NewS3Store(storage.S3Config{ |
| 180 | Endpoint: s.Endpoint, |
| 181 | Region: s.Region, |
| 182 | AccessKeyID: s.AccessKeyID, |
| 183 | SecretAccessKey: s.SecretAccessKey, |
| 184 | Bucket: s.Bucket, |
| 185 | UseSSL: s.UseSSL, |
| 186 | ForcePathStyle: s.ForcePathStyle, |
| 187 | }) |
| 188 | if err != nil { |
| 189 | return fmt.Errorf("client: %w", err) |
| 190 | } |
| 191 | |
| 192 | suffix := make([]byte, 8) |
| 193 | if _, err := rand.Read(suffix); err != nil { |
| 194 | return fmt.Errorf("random: %w", err) |
| 195 | } |
| 196 | key := "_storage-check/" + hex.EncodeToString(suffix) |
| 197 | body := []byte("storage check at " + time.Now().UTC().Format(time.RFC3339Nano)) |
| 198 | |
| 199 | if _, err := store.Put(ctx, key, strings.NewReader(string(body)), storage.PutOpts{ContentType: "text/plain"}); err != nil { |
| 200 | return fmt.Errorf("put: %w", err) |
| 201 | } |
| 202 | defer func() { _ = store.Delete(ctx, key) }() |
| 203 | |
| 204 | rc, _, err := store.Get(ctx, key) |
| 205 | if err != nil { |
| 206 | return fmt.Errorf("get: %w", err) |
| 207 | } |
| 208 | defer func() { _ = rc.Close() }() |
| 209 | got, err := io.ReadAll(rc) |
| 210 | if err != nil { |
| 211 | return fmt.Errorf("read body: %w", err) |
| 212 | } |
| 213 | if string(got) != string(body) { |
| 214 | return errors.New("round-trip body mismatch") |
| 215 | } |
| 216 | return nil |
| 217 | } |
| 218 |