Go · 3906 bytes Raw Blame History
1 // SPDX-License-Identifier: AGPL-3.0-or-later
2
3 package review
4
5 import (
6 "context"
7 "fmt"
8
9 "github.com/jackc/pgx/v5/pgtype"
10
11 pullsdb "github.com/tenseleyFlow/shithub/internal/pulls/sqlc"
12 repogit "github.com/tenseleyFlow/shithub/internal/repos/git"
13 )
14
15 // RemapAllForPR re-anchors every non-draft review comment on a PR
16 // against the new (base, head) snapshot. For each comment we do a
17 // content-aware check:
18 //
19 // 1. Read the line text at (original_commit_sha, file_path,
20 // original_line). This is what the comment was written against.
21 // 2. Read the same line number in the new blob (newHeadOID for
22 // side=right; newBaseOID for side=left).
23 // 3. If the bytes are identical → keep current_position = original_line.
24 // If not → current_position = NULL (outdated). The comment still
25 // renders in the conversation timeline; the Files tab hides it
26 // until the user clicks "Show outdated."
27 //
28 // This is the conservative v1 mapper. Lines that have been re-indented,
29 // shifted by an insertion above, or merely had a comma added all
30 // outdate — that's the right default. The spec calls out
31 // `git blame --porcelain` as a richer mapper for rebase-heavy PRs;
32 // add that when the simple presence check proves too aggressive.
33 //
34 // Idempotent: re-running converges on the same answer.
35 func RemapAllForPR(ctx context.Context, deps Deps, gitDir string, prID int64, newBaseOID, newHeadOID string) error {
36 if newBaseOID == "" || newHeadOID == "" {
37 return nil
38 }
39 q := pullsdb.New()
40 rows, err := q.ListNonDraftCommentsForPositionMap(ctx, deps.Pool, prID)
41 if err != nil {
42 return fmt.Errorf("position map list: %w", err)
43 }
44 if len(rows) == 0 {
45 return nil
46 }
47
48 // Per-(ref, path) blob cache so we read each blob at most once per
49 // PR, regardless of how many comments anchor into it.
50 type blobKey struct {
51 ref string
52 path string
53 }
54 cache := map[blobKey][]byte{}
55 loadBlob := func(ref, path string) []byte {
56 k := blobKey{ref, path}
57 if b, ok := cache[k]; ok {
58 return b
59 }
60 // We don't bother mapping files > 1 MiB; those comments outdate.
61 const maxBytes = 1 << 20
62 blob, err := repogit.ReadBlobBytes(ctx, gitDir, ref, path, maxBytes)
63 if err != nil {
64 cache[k] = nil
65 return nil
66 }
67 cache[k] = blob
68 return blob
69 }
70
71 for _, c := range rows {
72 newRef := newHeadOID
73 if c.Side == pullsdb.PrReviewSideLeft {
74 newRef = newBaseOID
75 }
76 original := loadBlob(c.OriginalCommitSha, c.FilePath)
77 current := loadBlob(newRef, c.FilePath)
78
79 var newPos pgtype.Int4
80 if line, ok := lineAt(original, int(c.OriginalLine)); ok {
81 if cur, ok := lineAt(current, int(c.OriginalLine)); ok && bytesEqual(line, cur) {
82 newPos = pgtype.Int4{Int32: c.OriginalLine, Valid: true}
83 }
84 }
85 if err := q.SetPRReviewCommentCurrentPosition(ctx, deps.Pool, pullsdb.SetPRReviewCommentCurrentPositionParams{
86 ID: c.ID,
87 CurrentPosition: newPos,
88 }); err != nil {
89 return fmt.Errorf("position map update: %w", err)
90 }
91 }
92 return nil
93 }
94
95 // lineAt returns the bytes of line N (1-indexed) in blob, excluding
96 // the trailing newline. Returns (nil, false) when the blob is nil/empty
97 // or N is out of range.
98 func lineAt(blob []byte, n int) ([]byte, bool) {
99 if blob == nil || n < 1 {
100 return nil, false
101 }
102 start, line := 0, 1
103 for i := 0; i < len(blob); i++ {
104 if blob[i] == '\n' {
105 if line == n {
106 return blob[start:i], true
107 }
108 line++
109 start = i + 1
110 }
111 }
112 // Trailing line without terminator.
113 if line == n && start < len(blob) {
114 return blob[start:], true
115 }
116 return nil, false
117 }
118
119 // bytesEqual is a tiny escape hatch so the test fixtures stay readable.
120 // `bytes.Equal` would do, but pulling the whole package in for one call
121 // adds noise to the position-map dependency graph.
122 func bytesEqual(a, b []byte) bool {
123 if len(a) != len(b) {
124 return false
125 }
126 for i := range a {
127 if a[i] != b[i] {
128 return false
129 }
130 }
131 return true
132 }
133