From 50574ad48e8e307f3a631fc9247f1fe437ae19b4 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:14:20 -0700 Subject: [PATCH 01/15] MM-68539: exclude draft PRs from review search results Drafts are not ready for review and shouldn't appear in /github todo, the daily reminder DM, or the SLA overdue digest. Add draft:false to the shared review search query so all three call sites filter drafts consistently. --- server/plugin/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/plugin/utils.go b/server/plugin/utils.go index d7adc8e72..e04fd7932 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -30,7 +30,7 @@ func getMentionSearchQuery(username string, orgs []string) string { } func getReviewSearchQuery(username string, orgs []string) string { - return buildSearchQuery("is:pr is:open review-requested:%v archived:false %v", username, orgs) + return buildSearchQuery("is:pr is:open draft:false review-requested:%v archived:false %v", username, orgs) } func getYourPrsSearchQuery(username string, orgs []string) string { From 103844451d6cea780754468ae26cf9fddef5c767 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:16:06 -0700 Subject: [PATCH 02/15] MM-68539: bucket the SLA digest into fixed overdue ranges Replace the per-day buckets with seven fixed ranges (1-3 / 4-7 / 8-14 / 15-30 / 31-90 / 91-365 / >1 year) so a digest with many overdue PRs stays scannable instead of producing dozens of single-day headers. Also surface the configured SLA target in the top-level header so readers can see what threshold the buckets are measured against. --- server/plugin/sla_digest.go | 70 +++++++++++++++----- server/plugin/sla_digest_test.go | 108 +++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 15 deletions(-) create mode 100644 server/plugin/sla_digest_test.go diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index eae14f275..1d233e965 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -71,7 +71,7 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { return } - msg := buildSLADigestMessage(entries) + msg := buildSLADigestMessage(entries, cfg.ReviewTargetDays) post := &model.Post{ ChannelId: cfg.OverdueReviewsChannelID, UserId: p.BotUserID, @@ -169,28 +169,68 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry return out } -func buildSLADigestMessage(entries []slaDigestEntry) string { - buckets := make(map[int][]string) - for _, e := range entries { - buckets[e.DaysOverdue] = append(buckets[e.DaysOverdue], e.Line) +// slaBuckets enumerates the digest's overdue buckets in display order (most overdue first). +// Each bucket emits a header even if empty buckets in between are skipped. +var slaBuckets = []struct { + label string + min int // inclusive; -1 for open-ended upper bucket + max int // inclusive; -1 for open-ended upper bucket +}{ + {label: "More than 1 year overdue", min: 366, max: -1}, + {label: "91-365 days overdue", min: 91, max: 365}, + {label: "31-90 days overdue", min: 31, max: 90}, + {label: "15-30 days overdue", min: 15, max: 30}, + {label: "8-14 days overdue", min: 8, max: 14}, + {label: "4-7 days overdue", min: 4, max: 7}, + {label: "1-3 days overdue", min: 1, max: 3}, +} + +// slaBucketIndex returns the index into slaBuckets for the given days-overdue value, or -1 if not overdue. +func slaBucketIndex(daysOverdue int) int { + if daysOverdue < 1 { + return -1 + } + for i, b := range slaBuckets { + if b.max == -1 { + if daysOverdue >= b.min { + return i + } + continue + } + if daysOverdue >= b.min && daysOverdue <= b.max { + return i + } } + return -1 +} - days := make([]int, 0, len(buckets)) - for d := range buckets { - days = append(days, d) +func buildSLADigestMessage(entries []slaDigestEntry, targetDays int) string { + bucketLines := make([][]string, len(slaBuckets)) + for _, e := range entries { + idx := slaBucketIndex(e.DaysOverdue) + if idx < 0 { + continue + } + bucketLines[idx] = append(bucketLines[idx], e.Line) } - sort.Sort(sort.Reverse(sort.IntSlice(days))) var b strings.Builder - b.WriteString("### Pull request reviews past SLA\n\n") - for _, d := range days { - lines := buckets[d] - sort.Strings(lines) + if targetDays > 0 { unit := "days" - if d == 1 { + if targetDays == 1 { unit = "day" } - fmt.Fprintf(&b, "#### %d %s overdue\n", d, unit) + fmt.Fprintf(&b, "### Pull request reviews past SLA (target: %d %s from most recent review request)\n\n", targetDays, unit) + } else { + b.WriteString("### Pull request reviews past SLA\n\n") + } + for i, bucket := range slaBuckets { + lines := bucketLines[i] + if len(lines) == 0 { + continue + } + sort.Strings(lines) + fmt.Fprintf(&b, "#### %s\n", bucket.label) for _, line := range lines { b.WriteString(line) b.WriteString("\n") diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go new file mode 100644 index 000000000..a9ce7f5fd --- /dev/null +++ b/server/plugin/sla_digest_test.go @@ -0,0 +1,108 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSLABucketIndex(t *testing.T) { + cases := []struct { + daysOverdue int + wantLabel string + }{ + {0, ""}, + {-3, ""}, + {1, "1-3 days overdue"}, + {3, "1-3 days overdue"}, + {4, "4-7 days overdue"}, + {7, "4-7 days overdue"}, + {8, "8-14 days overdue"}, + {14, "8-14 days overdue"}, + {15, "15-30 days overdue"}, + {30, "15-30 days overdue"}, + {31, "31-90 days overdue"}, + {90, "31-90 days overdue"}, + {91, "91-365 days overdue"}, + {365, "91-365 days overdue"}, + {366, "More than 1 year overdue"}, + {1000, "More than 1 year overdue"}, + } + + for _, tc := range cases { + idx := slaBucketIndex(tc.daysOverdue) + if tc.wantLabel == "" { + assert.Equal(t, -1, idx, "daysOverdue=%d expected no bucket", tc.daysOverdue) + continue + } + if assert.NotEqual(t, -1, idx, "daysOverdue=%d expected a bucket", tc.daysOverdue) { + assert.Equal(t, tc.wantLabel, slaBuckets[idx].label, "daysOverdue=%d", tc.daysOverdue) + } + } +} + +func TestBuildSLADigestMessage(t *testing.T) { + t.Run("groups entries into the correct buckets in display order", func(t *testing.T) { + entries := []slaDigestEntry{ + {DaysOverdue: 2, Line: "- a"}, + {DaysOverdue: 5, Line: "- b"}, + {DaysOverdue: 12, Line: "- c"}, + {DaysOverdue: 100, Line: "- d"}, + {DaysOverdue: 400, Line: "- e"}, + } + + msg := buildSLADigestMessage(entries, 3) + + assert.True(t, strings.HasPrefix(msg, "### Pull request reviews past SLA (target: 3 days from most recent review request)")) + + // Verify bucket order in message: most overdue first. + idxYear := strings.Index(msg, "#### More than 1 year overdue") + idx91 := strings.Index(msg, "#### 91-365 days overdue") + idx8 := strings.Index(msg, "#### 8-14 days overdue") + idx4 := strings.Index(msg, "#### 4-7 days overdue") + idx1 := strings.Index(msg, "#### 1-3 days overdue") + assert.True(t, idxYear >= 0 && idx91 > idxYear && idx8 > idx91 && idx4 > idx8 && idx1 > idx4, "buckets should appear in most-overdue-first order") + + assert.NotContains(t, msg, "#### 31-90 days overdue", "empty bucket should be omitted") + assert.NotContains(t, msg, "#### 15-30 days overdue", "empty bucket should be omitted") + }) + + t.Run("non-overdue entries are dropped", func(t *testing.T) { + entries := []slaDigestEntry{ + {DaysOverdue: 0, Line: "- skipped"}, + {DaysOverdue: -2, Line: "- skipped-too"}, + {DaysOverdue: 1, Line: "- kept"}, + } + msg := buildSLADigestMessage(entries, 3) + assert.Contains(t, msg, "- kept") + assert.NotContains(t, msg, "- skipped") + assert.NotContains(t, msg, "- skipped-too") + }) + + t.Run("singular target days uses 'day'", func(t *testing.T) { + msg := buildSLADigestMessage([]slaDigestEntry{{DaysOverdue: 1, Line: "- x"}}, 1) + assert.Contains(t, msg, "target: 1 day from") + }) + + t.Run("zero target days falls back to plain header", func(t *testing.T) { + msg := buildSLADigestMessage([]slaDigestEntry{{DaysOverdue: 1, Line: "- x"}}, 0) + assert.True(t, strings.HasPrefix(msg, "### Pull request reviews past SLA\n")) + }) + + t.Run("lines within a bucket are sorted alphabetically", func(t *testing.T) { + entries := []slaDigestEntry{ + {DaysOverdue: 2, Line: "- zeta"}, + {DaysOverdue: 2, Line: "- alpha"}, + {DaysOverdue: 2, Line: "- mu"}, + } + msg := buildSLADigestMessage(entries, 3) + ai := strings.Index(msg, "- alpha") + mi := strings.Index(msg, "- mu") + zi := strings.Index(msg, "- zeta") + assert.True(t, ai >= 0 && mi > ai && zi > mi, "expected alpha < mu < zeta in output") + }) +} From 46d7e123486c574143e0d290f2f2a4aef03a7832 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:17:19 -0700 Subject: [PATCH 03/15] MM-68539: link PRs from each digest line Render the PR title as a markdown link so readers can click straight through from the channel digest to the pull request. Escape `]` and `\` in titles so JIRA-prefixed names like "[MM-12345] Fix thing" do not terminate the link early. --- server/plugin/sla_digest_test.go | 48 ++++++++++++++++++++++++++++++++ server/plugin/utils.go | 20 ++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index a9ce7f5fd..47f1a56d3 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -10,6 +10,54 @@ import ( "github.com/stretchr/testify/assert" ) +func TestFormatChannelOverdueReviewLine(t *testing.T) { + const baseURL = "https://github.com/" + + t.Run("renders title as a markdown link to the PR", func(t *testing.T) { + line := formatChannelOverdueReviewLine( + "hmhealey", + "Fix race condition", + "https://github.com/mattermost/mattermost/pull/12345", + baseURL, + ) + assert.Equal(t, "- hmhealey - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) + }) + + t.Run("escapes closing brackets in the title so the link does not terminate early", func(t *testing.T) { + line := formatChannelOverdueReviewLine( + "hmhealey", + "[MM-12345] Fix [thing]", + "https://github.com/mattermost/mattermost/pull/1", + baseURL, + ) + // Only `]` needs escaping inside a markdown link's display text; `[` is allowed. + assert.Contains(t, line, `[[MM-12345\] Fix [thing\]](https://github.com/mattermost/mattermost/pull/1)`) + }) + + t.Run("falls back to raw URL as the repo display when owner/repo cannot be parsed", func(t *testing.T) { + line := formatChannelOverdueReviewLine( + "hmhealey", + "Title", + "https://example.invalid/something/odd", + baseURL, + ) + assert.Contains(t, line, "https://example.invalid/something/odd - [Title]") + }) + + t.Run("truncates very long titles before linking", func(t *testing.T) { + title := strings.Repeat("a", 250) + line := formatChannelOverdueReviewLine( + "hmhealey", + title, + "https://github.com/mattermost/mattermost/pull/1", + baseURL, + ) + assert.Contains(t, line, "...](https://github.com/mattermost/mattermost/pull/1)") + // Title body inside the link should not exceed 200 'a's followed by the ellipsis. + assert.NotContains(t, line, strings.Repeat("a", 201)) + }) +} + func TestSLABucketIndex(t *testing.T) { cases := []struct { daysOverdue int diff --git a/server/plugin/utils.go b/server/plugin/utils.go index e04fd7932..50c37c77f 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -379,6 +379,9 @@ func slaCalendarDiffDays(createdAt github.Timestamp, targetDays int, now time.Ti } // formatChannelOverdueReviewLine formats a single line for the overdue-SLA channel digest. +// The title is rendered as a markdown link to htmlURL so readers can click straight to the PR. +// When owner/repo cannot be parsed from htmlURL, the raw URL is used as the repo display so +// nothing renders as " / ". func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) string { owner, repo := parseOwnerAndRepo(htmlURL, baseURL) repoDisplay := fmt.Sprintf("%s/%s", owner, repo) @@ -388,7 +391,22 @@ func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) if len(title) > 200 { title = strings.TrimSpace(title[:200]) + "..." } - return fmt.Sprintf("- %s - %s : %s", githubLogin, repoDisplay, title) + titleDisplay := title + if htmlURL != "" { + titleDisplay = fmt.Sprintf("[%s](%s)", escapeMarkdownLinkText(title), htmlURL) + } + return fmt.Sprintf("- %s - %s - %s", githubLogin, repoDisplay, titleDisplay) +} + +// escapeMarkdownLinkText escapes characters that would break a markdown link's display text. +// We only escape `]` and `\` so that titles containing brackets (common in JIRA-prefixed PRs) +// do not terminate the link early. +func escapeMarkdownLinkText(s string) string { + if s == "" { + return s + } + r := strings.NewReplacer(`\`, `\\`, `]`, `\]`) + return r.Replace(s) } // reviewSLAMarkdown returns a Markdown SLA suffix for Mattermost posts and whether the review is overdue. From b80f105ce29bcea8e7f11ce550af9279a06404fd Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:19:14 -0700 Subject: [PATCH 04/15] MM-68539: render Mattermost mentions for digest reviewers Map each reviewer's GitHub login to their Mattermost username via the existing getGitHubToUsernameMapping helper so the digest produces real Mattermost @-mentions: - @harrison (hmhealey) - owner/repo - [title](url) Reviewers who haven't connected the GitHub plugin render as "(not connected) - " so the digest still surfaces them without misleading anyone with an @ that won't notify. --- server/plugin/sla_digest.go | 17 ++++++++++++++++- server/plugin/sla_digest_test.go | 16 +++++++++++++--- server/plugin/utils.go | 6 ++++-- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index 1d233e965..9341d0b9f 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -87,6 +87,20 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { } } +// resolveReviewerDisplayName turns a GitHub login into the digest's reviewer column. +// Connected users get an actual Mattermost @-mention so the post notifies them; unconnected +// users render as "(not connected) - " so admins can see who is missing. +func (p *Plugin) resolveReviewerDisplayName(githubLogin string) string { + if githubLogin == "" { + return "" + } + mmUsername := p.getGitHubToUsernameMapping(githubLogin) + if mmUsername != "" { + return fmt.Sprintf("@%s (%s)", mmUsername, githubLogin) + } + return fmt.Sprintf("(not connected) - %s", githubLogin) +} + func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry { config := p.getConfiguration() targetDays := config.ReviewTargetDays @@ -147,6 +161,7 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry continue } + reviewerDisplay := p.resolveReviewerDisplayName(ghInfo.GitHubUsername) for _, pr := range allIssues { slaStart := p.effectiveReviewSLAStart(pr, baseURL, ghInfo.GitHubUsername) diff := slaCalendarDiffDays(slaStart, targetDays, now) @@ -154,7 +169,7 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry continue } daysOverdue := -diff - line := formatChannelOverdueReviewLine(ghInfo.GitHubUsername, pr.GetTitle(), pr.GetHTMLURL(), baseURL) + line := formatChannelOverdueReviewLine(reviewerDisplay, pr.GetTitle(), pr.GetHTMLURL(), baseURL) out = append(out, slaDigestEntry{DaysOverdue: daysOverdue, Line: line}) } diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index 47f1a56d3..c172304f6 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -13,14 +13,24 @@ import ( func TestFormatChannelOverdueReviewLine(t *testing.T) { const baseURL = "https://github.com/" - t.Run("renders title as a markdown link to the PR", func(t *testing.T) { + t.Run("renders connected reviewer with @-mention and bracketed github login", func(t *testing.T) { line := formatChannelOverdueReviewLine( - "hmhealey", + "@harrison (hmhealey)", + "Fix race condition", + "https://github.com/mattermost/mattermost/pull/12345", + baseURL, + ) + assert.Equal(t, "- @harrison (hmhealey) - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) + }) + + t.Run("renders unconnected reviewer with `(not connected) - login` prefix", func(t *testing.T) { + line := formatChannelOverdueReviewLine( + "(not connected) - hmhealey", "Fix race condition", "https://github.com/mattermost/mattermost/pull/12345", baseURL, ) - assert.Equal(t, "- hmhealey - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) + assert.Equal(t, "- (not connected) - hmhealey - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) }) t.Run("escapes closing brackets in the title so the link does not terminate early", func(t *testing.T) { diff --git a/server/plugin/utils.go b/server/plugin/utils.go index 50c37c77f..4dc7b277c 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -379,10 +379,12 @@ func slaCalendarDiffDays(createdAt github.Timestamp, targetDays int, now time.Ti } // formatChannelOverdueReviewLine formats a single line for the overdue-SLA channel digest. +// reviewerDisplay is a pre-resolved name for the reviewer column (e.g. "@harrison (hmhealey)" for +// connected users or "(not connected) - hmhealey" for users without a Mattermost mapping). // The title is rendered as a markdown link to htmlURL so readers can click straight to the PR. // When owner/repo cannot be parsed from htmlURL, the raw URL is used as the repo display so // nothing renders as " / ". -func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) string { +func formatChannelOverdueReviewLine(reviewerDisplay, title, htmlURL, baseURL string) string { owner, repo := parseOwnerAndRepo(htmlURL, baseURL) repoDisplay := fmt.Sprintf("%s/%s", owner, repo) if owner == "" || repo == "" { @@ -395,7 +397,7 @@ func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) if htmlURL != "" { titleDisplay = fmt.Sprintf("[%s](%s)", escapeMarkdownLinkText(title), htmlURL) } - return fmt.Sprintf("- %s - %s - %s", githubLogin, repoDisplay, titleDisplay) + return fmt.Sprintf("- %s - %s - %s", reviewerDisplay, repoDisplay, titleDisplay) } // escapeMarkdownLinkText escapes characters that would break a markdown link's display text. From 86ad82556fc59c0e8997813322d0f740180506da Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:21:24 -0700 Subject: [PATCH 05/15] MM-68539: backfill SLA start time from PR timeline The digest's days-overdue clock previously used the cached time of the review_requested webhook, falling back to the PR open date when no KV record existed. PRs whose most recent review was requested before the plugin was installed therefore showed nonsense numbers like 86 days overdue when the actual outstanding review was a few days old. When the digest collector encounters a PR with no cached SLA start, it now pages the issue timeline once to find the most recent surviving review_requested event for the reviewer and caches the result under the existing slarr_v1_* key. Subsequent runs are free. Only the digest path uses the new effectiveReviewSLAStartForDigest variant; /github todo and the RHS panel keep using the no-network effectiveReviewSLAStart so user-facing requests stay fast. --- server/plugin/review_sla.go | 103 +++++++++++++++++++++++++++++++ server/plugin/review_sla_test.go | 89 ++++++++++++++++++++++++++ server/plugin/sla_digest.go | 2 +- 3 files changed, 193 insertions(+), 1 deletion(-) diff --git a/server/plugin/review_sla.go b/server/plugin/review_sla.go index ee83b39f3..edc58f36d 100644 --- a/server/plugin/review_sla.go +++ b/server/plugin/review_sla.go @@ -4,8 +4,10 @@ package plugin import ( + "context" "crypto/sha256" "encoding/hex" + "sort" "strconv" "strings" "time" @@ -115,6 +117,107 @@ func (p *Plugin) effectiveReviewSLAStart(pr *github.Issue, baseURL, reviewerGitH return pr.GetCreatedAt() } +// findMostRecentReviewRequestTime walks PR timeline events chronologically and returns the +// timestamp of the most recent surviving review_requested event for githubLogin. Returns the +// zero time if the user has no current pending request (e.g. the request was later removed +// without being re-requested). +func findMostRecentReviewRequestTime(events []*github.Timeline, githubLogin string) time.Time { + target := strings.ToLower(strings.TrimSpace(githubLogin)) + if target == "" { + return time.Time{} + } + + // Defensive sort by CreatedAt ascending; GitHub typically returns events in chronological + // order already, but pagination joins are not guaranteed to be ordered across pages. + sorted := make([]*github.Timeline, 0, len(events)) + for _, e := range events { + if e == nil || e.CreatedAt == nil { + continue + } + sorted = append(sorted, e) + } + sort.SliceStable(sorted, func(i, j int) bool { + return sorted[i].CreatedAt.Time.Before(sorted[j].CreatedAt.Time) + }) + + var current time.Time + for _, e := range sorted { + if e.Reviewer == nil { + continue + } + if strings.ToLower(e.Reviewer.GetLogin()) != target { + continue + } + switch e.GetEvent() { + case "review_requested": + current = e.CreatedAt.Time + case "review_request_removed": + current = time.Time{} + } + } + return current +} + +// backfillReviewSLAStartFromTimeline reads the PR timeline for (owner, repo, prNumber) and, if +// it finds a surviving review_requested event for githubLogin, persists the timestamp under +// the same KV key used by recordReviewRequestSLAStart so subsequent digest runs are free. +// Returns the resolved time (zero if none was found) so callers can use it directly. +func (p *Plugin) backfillReviewSLAStartFromTimeline(ctx context.Context, gh *github.Client, owner, repo string, prNumber int, githubLogin string) (time.Time, error) { + if gh == nil || owner == "" || repo == "" || prNumber == 0 || githubLogin == "" { + return time.Time{}, nil + } + + var events []*github.Timeline + opts := &github.ListOptions{PerPage: 100} + for { + page, resp, err := gh.Issues.ListIssueTimeline(ctx, owner, repo, prNumber, opts) + if err != nil { + return time.Time{}, err + } + events = append(events, page...) + if resp == nil || resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + + found := findMostRecentReviewRequestTime(events, githubLogin) + if found.IsZero() { + return time.Time{}, nil + } + + key := reviewSLAStartKey(owner, repo, prNumber, githubLogin) + val := []byte(found.UTC().Format(time.RFC3339Nano)) + if _, err := p.store.Set(key, val); err != nil { + p.client.Log.Warn("Failed to cache backfilled review SLA start time", "key", key, "error", err.Error()) + } + return found.UTC(), nil +} + +// effectiveReviewSLAStartForDigest is the digest's SLA-start resolver. It prefers the cached +// KV record, falls back to a one-time timeline backfill (caching the result), and finally +// falls back to PR open date. Unlike effectiveReviewSLAStart this can make a network call, +// so callers serving user-facing requests (/github todo, RHS) should keep using +// effectiveReviewSLAStart. +func (p *Plugin) effectiveReviewSLAStartForDigest(ctx context.Context, gh *github.Client, pr *github.Issue, baseURL, reviewerGitHubLogin string) github.Timestamp { + owner, repo := issueOwnerRepo(pr, baseURL) + num := pr.GetNumber() + if owner == "" || repo == "" || num == 0 { + return pr.GetCreatedAt() + } + if t := p.getReviewSLAStartTime(owner, repo, num, reviewerGitHubLogin); !t.IsZero() { + return github.Timestamp{Time: t} + } + if gh != nil { + if t, err := p.backfillReviewSLAStartFromTimeline(ctx, gh, owner, repo, num, reviewerGitHubLogin); err != nil { + p.client.Log.Debug("Timeline backfill failed; falling back to PR created_at", "owner", owner, "repo", repo, "pr", num, "reviewer", reviewerGitHubLogin, "error", err.Error()) + } else if !t.IsZero() { + return github.Timestamp{Time: t} + } + } + return pr.GetCreatedAt() +} + // enrichReviewsWithSLAStart sets review_sla_start on LHS review items so the webapp can match server SLA logic. func (p *Plugin) enrichReviewsWithSLAStart(reviews []*graphql.GithubPRDetails, reviewerLogin string) { cfg := p.getConfiguration() diff --git a/server/plugin/review_sla_test.go b/server/plugin/review_sla_test.go index 55dec9e08..d5c5c93c4 100644 --- a/server/plugin/review_sla_test.go +++ b/server/plugin/review_sla_test.go @@ -5,7 +5,9 @@ package plugin import ( "testing" + "time" + "github.com/google/go-github/v54/github" "github.com/stretchr/testify/assert" ) @@ -17,3 +19,90 @@ func TestReviewSLAStartKeyStable(t *testing.T) { k3 := reviewSLAStartKey("Mattermost", "mattermost", 99999, "octocat") assert.NotEqual(t, k1, k3) } + +func TestFindMostRecentReviewRequestTime(t *testing.T) { + user := func(login string) *github.User { return &github.User{Login: github.String(login)} } + at := func(s string) *github.Timestamp { + ts, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("bad timestamp %q: %v", s, err) + } + return &github.Timestamp{Time: ts} + } + ev := func(name, login, ts string) *github.Timeline { + return &github.Timeline{ + Event: github.String(name), + Reviewer: user(login), + CreatedAt: at(ts), + } + } + + t.Run("no events returns zero", func(t *testing.T) { + got := findMostRecentReviewRequestTime(nil, "octocat") + assert.True(t, got.IsZero()) + }) + + t.Run("single review_requested returns its timestamp", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("login match is case-insensitive", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "OctoCat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("review_request_removed after request invalidates pending", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.True(t, got.IsZero(), "removed request should leave no pending start") + }) + + t.Run("re-request after remove uses the most recent request", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-22T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("events for other reviewers are ignored", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "alice", "2026-04-25T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("out-of-order pages are sorted defensively", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-22T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("non-review-request events are ignored", func(t *testing.T) { + events := []*github.Timeline{ + ev("commented", "octocat", "2026-04-25T10:00:00Z"), + ev("labeled", "octocat", "2026-04-26T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) +} diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index 9341d0b9f..ca8444cbe 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -163,7 +163,7 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry reviewerDisplay := p.resolveReviewerDisplayName(ghInfo.GitHubUsername) for _, pr := range allIssues { - slaStart := p.effectiveReviewSLAStart(pr, baseURL, ghInfo.GitHubUsername) + slaStart := p.effectiveReviewSLAStartForDigest(ctx, githubClient, pr, baseURL, ghInfo.GitHubUsername) diff := slaCalendarDiffDays(slaStart, targetDays, now) if diff >= 0 { continue From 3082892c242e33b8c62f1d56ffcb1376f30a9c69 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:53:58 -0700 Subject: [PATCH 06/15] MM-68539: scan all org PRs for overdue reviewers The digest used to iterate over each connected Mattermost user and run a separate GitHub search per user. That meant a reviewer was only included when they had personally connected their GitHub account, which is why the first run only listed hmhealey. Replace that loop with a single org-wide GraphQL search per configured organization. The query asks for every open non-draft PR plus its requestedReviewers (users and teams). Team review requests are expanded once per run via the Teams API, with results cached so we don't refetch the same team on every PR. Each (PR, reviewer) pair is deduped before being scored against the SLA target. The org scan needs an authenticated caller, so we pick the first connected Mattermost user as a service user. If no Mattermost user has connected a GitHub account yet, the digest logs a warning and skips posting rather than running with no token. Also cleans up an embedded-field selector in findMostRecentReviewRequestTime that staticcheck flagged after the timeline backfill commit landed. --- server/plugin/graphql/digest_query.go | 132 +++++++++++++++++++ server/plugin/review_sla.go | 2 +- server/plugin/sla_digest.go | 174 ++++++++++++++++++-------- 3 files changed, 253 insertions(+), 55 deletions(-) create mode 100644 server/plugin/graphql/digest_query.go diff --git a/server/plugin/graphql/digest_query.go b/server/plugin/graphql/digest_query.go new file mode 100644 index 000000000..67f5392e5 --- /dev/null +++ b/server/plugin/graphql/digest_query.go @@ -0,0 +1,132 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package graphql + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/shurcooL/githubv4" +) + +// DigestPR is a single open non-draft PR returned by GetOpenPRsWithRequestedReviewers, +// flattened from the GraphQL search response so callers don't need to know about githubv4. +type DigestPR struct { + Owner string + Repo string + Number int + Title string + URL string + CreatedAt time.Time + RequestedUsers []string + RequestedTeams []DigestTeamRef +} + +// DigestTeamRef identifies a team review request that the caller can expand to member logins. +type DigestTeamRef struct { + Org string // organization login + Slug string // team slug +} + +type digestRequestedReviewer struct { + Type githubv4.String `graphql:"__typename"` + User struct { + Login githubv4.String + } `graphql:"... on User"` + Team struct { + Slug githubv4.String + Organization struct { + Login githubv4.String + } + } `graphql:"... on Team"` +} + +type digestPRSearchNode struct { + PullRequest struct { + Number githubv4.Int + Title githubv4.String + URL githubv4.URI + CreatedAt githubv4.DateTime + Repository struct { + Name githubv4.String + Owner struct { + Login githubv4.String + } + } + ReviewRequests struct { + Nodes []struct { + RequestedReviewer digestRequestedReviewer + } + } `graphql:"reviewRequests(first:100)"` + } `graphql:"... on PullRequest"` +} + +var orgOpenPRsQuery struct { + Search struct { + Nodes []digestPRSearchNode + PageInfo struct { + EndCursor githubv4.String + HasNextPage bool + } + } `graphql:"search(first:100, after:$cursor, query:$query, type:ISSUE)"` +} + +// GetOpenPRsWithRequestedReviewers returns every open non-draft PR in org along with the +// users and teams currently requested for review on each one. Pages through the GitHub +// search API at 100 PRs per call. +func (c *Client) GetOpenPRsWithRequestedReviewers(ctx context.Context, org string) ([]DigestPR, error) { + if org == "" { + return nil, errors.New("org is required for org-wide PR search") + } + + query := fmt.Sprintf("is:pr is:open archived:false draft:false org:%s", org) + params := map[string]any{ + "query": githubv4.String(query), + "cursor": (*githubv4.String)(nil), + } + + var out []DigestPR + for { + if err := c.executeQuery(ctx, &orgOpenPRsQuery, params); err != nil { + return nil, errors.Wrapf(err, "org-wide PR search failed for org %q", org) + } + + for _, node := range orgOpenPRsQuery.Search.Nodes { + pr := DigestPR{ + Owner: string(node.PullRequest.Repository.Owner.Login), + Repo: string(node.PullRequest.Repository.Name), + Number: int(node.PullRequest.Number), + Title: string(node.PullRequest.Title), + URL: node.PullRequest.URL.String(), + CreatedAt: node.PullRequest.CreatedAt.Time, + } + for _, rr := range node.PullRequest.ReviewRequests.Nodes { + switch string(rr.RequestedReviewer.Type) { + case "User": + if login := string(rr.RequestedReviewer.User.Login); login != "" { + pr.RequestedUsers = append(pr.RequestedUsers, login) + } + case "Team": + orgLogin := string(rr.RequestedReviewer.Team.Organization.Login) + slug := string(rr.RequestedReviewer.Team.Slug) + if orgLogin == "" { + orgLogin = org + } + if slug != "" { + pr.RequestedTeams = append(pr.RequestedTeams, DigestTeamRef{Org: orgLogin, Slug: slug}) + } + } + } + out = append(out, pr) + } + + if !orgOpenPRsQuery.Search.PageInfo.HasNextPage { + break + } + params["cursor"] = githubv4.NewString(orgOpenPRsQuery.Search.PageInfo.EndCursor) + } + return out, nil +} diff --git a/server/plugin/review_sla.go b/server/plugin/review_sla.go index edc58f36d..9a6fa080a 100644 --- a/server/plugin/review_sla.go +++ b/server/plugin/review_sla.go @@ -137,7 +137,7 @@ func findMostRecentReviewRequestTime(events []*github.Timeline, githubLogin stri sorted = append(sorted, e) } sort.SliceStable(sorted, func(i, j int) bool { - return sorted[i].CreatedAt.Time.Before(sorted[j].CreatedAt.Time) + return sorted[i].CreatedAt.Before(sorted[j].CreatedAt.Time) }) var current time.Time diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index ca8444cbe..3b4c608c0 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -7,15 +7,17 @@ import ( "context" "fmt" "sort" + "strconv" "strings" "time" "github.com/google/go-github/v54/github" + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" + "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/pluginapi" "github.com/mattermost/mattermost/server/public/pluginapi/cluster" - "golang.org/x/oauth2" ) const ( @@ -101,6 +103,37 @@ func (p *Plugin) resolveReviewerDisplayName(githubLogin string) string { return fmt.Sprintf("(not connected) - %s", githubLogin) } +// pickServiceGitHubUser returns the first connected user the digest can use as a service +// caller for org-wide GitHub queries. Skips users whose token cannot be loaded. Returns nil +// when no connected user is available; the digest cannot run in that case. +func (p *Plugin) pickServiceGitHubUser(ctx context.Context) *GitHubUserInfo { + checker := func(key string) (bool, error) { + return strings.HasSuffix(key, githubTokenKey), nil + } + for page := 0; ; page++ { + if ctx.Err() != nil { + return nil + } + keys, err := p.store.ListKeys(page, keysPerPage, pluginapi.WithChecker(checker)) + if err != nil { + p.client.Log.Warn("SLA digest failed to list connected users", "error", err.Error()) + return nil + } + for _, key := range keys { + userID := strings.TrimSuffix(key, githubTokenKey) + ghInfo, apiErr := p.getGitHubUserInfo(userID) + if apiErr != nil || ghInfo == nil { + continue + } + return ghInfo + } + if len(keys) < keysPerPage { + break + } + } + return nil +} + func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry { config := p.getConfiguration() targetDays := config.ReviewTargetDays @@ -108,76 +141,109 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry orgList := config.getOrganizations() now := time.Now() - checker := func(key string) (bool, error) { - return strings.HasSuffix(key, githubTokenKey), nil + if len(orgList) == 0 { + p.client.Log.Warn("SLA digest cannot run without configured organizations (System Console -> Plugins -> GitHub -> GitHub Organizations)") + return nil + } + + serviceUser := p.pickServiceGitHubUser(ctx) + if serviceUser == nil { + p.client.Log.Warn("SLA digest cannot run: no connected GitHub user available to act as the service caller") + return nil + } + + githubClient := p.githubConnectUser(ctx, serviceUser) + graphQLClient := p.graphQLConnect(serviceUser) + + var allPRs []graphql.DigestPR + for _, org := range orgList { + if ctx.Err() != nil { + return nil + } + prs, err := graphQLClient.GetOpenPRsWithRequestedReviewers(ctx, org) + if err != nil { + p.client.Log.Warn("SLA digest org PR fetch failed", "org", org, "error", err.Error()) + continue + } + allPRs = append(allPRs, prs...) + } + + teamCache := make(map[string][]string) + resolveTeam := func(team graphql.DigestTeamRef) []string { + key := team.Org + "/" + team.Slug + if members, ok := teamCache[key]; ok { + return members + } + var members []string + opts := &github.TeamListTeamMembersOptions{ListOptions: github.ListOptions{PerPage: 100}} + for { + page, resp, err := githubClient.Teams.ListTeamMembersBySlug(ctx, team.Org, team.Slug, opts) + if err != nil { + p.client.Log.Debug("SLA digest team expansion failed", "team", key, "error", err.Error()) + break + } + for _, u := range page { + if login := u.GetLogin(); login != "" { + members = append(members, login) + } + } + if resp == nil || resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + teamCache[key] = members + return members } var out []slaDigestEntry + seen := make(map[string]bool) - for page := 0; ; page++ { + for _, pr := range allPRs { if ctx.Err() != nil { return out } - keys, err := p.store.ListKeys(page, keysPerPage, pluginapi.WithChecker(checker)) - if err != nil { - p.client.Log.Warn("Failed to list keys for SLA digest", "error", err.Error()) - break + logins := append([]string(nil), pr.RequestedUsers...) + for _, team := range pr.RequestedTeams { + logins = append(logins, resolveTeam(team)...) + } + if len(logins) == 0 { + continue } - for _, key := range keys { - if ctx.Err() != nil { - return out - } + // Synthetic *github.Issue so we can reuse effectiveReviewSLAStartForDigest, which + // expects the same shape as the per-user search results. + prIssue := &github.Issue{ + Number: github.Int(pr.Number), + Title: github.String(pr.Title), + HTMLURL: github.String(pr.URL), + CreatedAt: &github.Timestamp{Time: pr.CreatedAt}, + Repository: &github.Repository{ + Name: github.String(pr.Repo), + Owner: &github.User{Login: github.String(pr.Owner)}, + }, + } - userID := strings.TrimSuffix(key, githubTokenKey) - ghInfo, apiErr := p.getGitHubUserInfo(userID) - if apiErr != nil || ghInfo == nil { - time.Sleep(delayBetweenUsers) + for _, login := range logins { + if login == "" { continue } - - githubClient := p.githubConnectUser(ctx, ghInfo) - var allIssues []*github.Issue - cErr := p.useGitHubClient(ghInfo, func(gi *GitHubUserInfo, token *oauth2.Token) error { - query := getReviewSearchQuery(gi.GitHubUsername, orgList) - opts := &github.SearchOptions{ListOptions: github.ListOptions{PerPage: 100}} - for { - result, resp, searchErr := githubClient.Search.Issues(ctx, query, opts) - if searchErr != nil { - return searchErr - } - allIssues = append(allIssues, result.Issues...) - if resp.NextPage == 0 { - break - } - opts.Page = resp.NextPage - } - return nil - }) - if cErr != nil { - p.client.Log.Debug("SLA digest skipped user review search", "user_id", userID, "error", cErr.Error()) - time.Sleep(delayBetweenUsers) + dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(login) + if seen[dedupeKey] { continue } + seen[dedupeKey] = true - reviewerDisplay := p.resolveReviewerDisplayName(ghInfo.GitHubUsername) - for _, pr := range allIssues { - slaStart := p.effectiveReviewSLAStartForDigest(ctx, githubClient, pr, baseURL, ghInfo.GitHubUsername) - diff := slaCalendarDiffDays(slaStart, targetDays, now) - if diff >= 0 { - continue - } - daysOverdue := -diff - line := formatChannelOverdueReviewLine(reviewerDisplay, pr.GetTitle(), pr.GetHTMLURL(), baseURL) - out = append(out, slaDigestEntry{DaysOverdue: daysOverdue, Line: line}) + slaStart := p.effectiveReviewSLAStartForDigest(ctx, githubClient, prIssue, baseURL, login) + diff := slaCalendarDiffDays(slaStart, targetDays, now) + if diff >= 0 { + continue } - - time.Sleep(delayBetweenUsers) - } - - if len(keys) < keysPerPage { - break + daysOverdue := -diff + reviewerDisplay := p.resolveReviewerDisplayName(login) + line := formatChannelOverdueReviewLine(reviewerDisplay, pr.Title, pr.URL, baseURL) + out = append(out, slaDigestEntry{DaysOverdue: daysOverdue, Line: line}) } } From 8493293c99cc822ca08685220b24d0d967bf230a Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 16:58:59 -0700 Subject: [PATCH 07/15] MM-68539: show review due dates in the RHS panel The "Pull Requests Needing Review" panel showed each PR's open date but not whether the review was actually overdue against the configured SLA, so engineers had no way to spot urgent items at a glance. Render an SLA badge per review item with one of three states: "Overdue by N days" (red), "Due today" (amber), or "Due in N days" (neutral). The badge only appears in the REVIEWS panel and only when an admin has set a positive Review Target Days; other RHS panels and unconfigured installs are unchanged. Extract the SLA-start and overdue computation into webapp/src/utils/sla so the existing red-button indicator on the sidebar buttons and the new per-item badge agree on what "overdue" means. The math mirrors the server's calendar-day diff (UTC midnight to UTC midnight) so a PR that is overdue in the digest is overdue in the RHS too. The shared util prefers review_sla_start (which the server already populates from the review_request webhook or timeline backfill) and falls back to created_at, matching the digest's effectiveReviewSLAStart. Also tightens the configuration reducer's typing so review_target_days flows through getSidebarData without needing per-call assertions. --- .../sidebar_buttons/sidebar_buttons.jsx | 33 +------- .../components/sidebar_right/github_items.tsx | 60 +++++++++++++ webapp/src/components/sidebar_right/index.jsx | 3 +- .../sidebar_right/sidebar_right.jsx | 3 + webapp/src/reducers/index.ts | 6 +- webapp/src/selectors.ts | 3 +- webapp/src/types/github_types.ts | 9 +- webapp/src/utils/sla.ts | 84 +++++++++++++++++++ 8 files changed, 164 insertions(+), 37 deletions(-) create mode 100644 webapp/src/utils/sla.ts diff --git a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx index 14c24272f..66f01e7c6 100644 --- a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx +++ b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx @@ -7,6 +7,7 @@ import PropTypes from 'prop-types'; import {makeStyleFromTheme, changeOpacity} from 'mattermost-redux/utils/theme_utils'; import {RHSStates} from '../../constants'; +import {reviewsHaveOverdue} from '../../utils/sla'; export default class SidebarButtons extends React.PureComponent { static propTypes = { @@ -211,36 +212,6 @@ export default class SidebarButtons extends React.PureComponent { } } -function reviewHasOverdue(reviews, targetDays) { - if (!targetDays || !reviews || reviews.length === 0) { - return false; - } - for (const pr of reviews) { - const startIso = pr.review_sla_start || pr.created_at; - if (!startIso) { - continue; - } - const created = new Date(startIso); - if (Number.isNaN(created.getTime())) { - continue; - } - const due = new Date(Date.UTC( - created.getUTCFullYear(), - created.getUTCMonth(), - created.getUTCDate(), - )); - due.setUTCDate(due.getUTCDate() + targetDays); - const today = new Date(); - const todayUTC = Date.UTC(today.getUTCFullYear(), today.getUTCMonth(), today.getUTCDate()); - const dueUTC = Date.UTC(due.getUTCFullYear(), due.getUTCMonth(), due.getUTCDate()); - const diffDays = Math.round((dueUTC - todayUTC) / (24 * 60 * 60 * 1000)); - if (diffDays < 0) { - return true; - } - } - return false; -} - function reviewButtonStyle(base, reviews, targetDays) { if (!targetDays) { return base; @@ -249,7 +220,7 @@ function reviewButtonStyle(base, reviews, targetDays) { if (list.length === 0) { return base; } - if (reviewHasOverdue(list, targetDays)) { + if (reviewsHaveOverdue(list, targetDays)) { return {...base, color: 'var(--dnd-indicator)'}; } return {...base, color: 'var(--online-indicator)'}; diff --git a/webapp/src/components/sidebar_right/github_items.tsx b/webapp/src/components/sidebar_right/github_items.tsx index e98d2bb88..453f1e20a 100644 --- a/webapp/src/components/sidebar_right/github_items.tsx +++ b/webapp/src/components/sidebar_right/github_items.tsx @@ -12,6 +12,7 @@ import {GitPullRequestIcon, IssueOpenedIcon, IconProps, CalendarIcon, PersonIcon import {GithubItemsProps, GithubLabel, GithubItem, Review} from '../../types/github_types'; import {formatTimeSince} from '../../utils/date_utils'; +import {getReviewSLAStatus} from '../../utils/sla'; import CrossIcon from '../../images/icons/cross'; import DotIcon from '../../images/icons/dot'; @@ -37,6 +38,7 @@ const notificationReasons = { function GithubItems(props: GithubItemsProps) { const style = getStyle(props.theme); + const showReviewSLA = Boolean(props.showReviewSLA && props.reviewTargetDays && props.reviewTargetDays > 0); return props.items.length > 0 ? props.items.map((item) => { let repoName = ''; @@ -137,6 +139,11 @@ function GithubItems(props: GithubItemsProps) { const reviews = getReviewText(item, style, (item.created_at != null || userName != null || milestone != null)); + let slaBadge: JSX.Element | null = null; + if (showReviewSLA) { + slaBadge = renderReviewSLABadge(item, props.reviewTargetDays || 0, style); + } + // Status images pasted directly from GitHub. Change to our own version when styles are decided. let status: JSX.Element | null = null; if (item.status) { @@ -304,6 +311,7 @@ function GithubItems(props: GithubItemsProps) { {notificationReasons[item.reason as keyof typeof notificationReasons]} ) : null } + {slaBadge} {reviews} {pullRequestDetails} @@ -375,9 +383,61 @@ const getStyle = makeStyleFromTheme((theme) => { prOpenSince: { margin: '5px 0 0 0', }, + slaBadgeRow: { + margin: '6px 0 0 0', + }, + slaBadgeBase: { + display: 'inline-block', + padding: '2px 8px', + borderRadius: '4px', + fontSize: '12px', + fontWeight: 600, + lineHeight: 1.4, + }, + slaBadgeOverdue: { + backgroundColor: changeOpacity(theme.dndIndicator, 0.15), + color: theme.dndIndicator, + }, + slaBadgeDueToday: { + backgroundColor: changeOpacity(theme.awayIndicator, 0.2), + color: theme.awayIndicator, + }, + slaBadgeDueLater: { + backgroundColor: changeOpacity(theme.centerChannelColor, 0.08), + color: changeOpacity(theme.centerChannelColor, 0.75), + }, }; }); +function renderReviewSLABadge(item: GithubItem, targetDays: number, style: any): JSX.Element | null { + const status = getReviewSLAStatus(item, targetDays); + if (!status) { + return null; + } + + let label: string; + let badgeStyle: CSS.Properties; + if (status.daysFromDue < 0) { + const overdueDays = -status.daysFromDue; + label = `Overdue by ${overdueDays} ${overdueDays === 1 ? 'day' : 'days'}`; + badgeStyle = style.slaBadgeOverdue; + } else if (status.daysFromDue === 0) { + label = 'Due today'; + badgeStyle = style.slaBadgeDueToday; + } else { + label = `Due in ${status.daysFromDue} ${status.daysFromDue === 1 ? 'day' : 'days'}`; + badgeStyle = style.slaBadgeDueLater; + } + + return ( +
+ + {label} + +
+ ); +} + function getGithubLabels(labels: GithubLabel[]) { return labels.map((label) => { return ( diff --git a/webapp/src/components/sidebar_right/index.jsx b/webapp/src/components/sidebar_right/index.jsx index 25717d359..3e62e1c69 100644 --- a/webapp/src/components/sidebar_right/index.jsx +++ b/webapp/src/components/sidebar_right/index.jsx @@ -11,7 +11,7 @@ import {getSidebarData} from 'src/selectors'; import SidebarRight from './sidebar_right.jsx'; function mapStateToProps(state) { - const {username, reviews, yourPrs, yourAssignments, unreads, enterpriseURL, orgs, rhsState} = getSidebarData(state); + const {username, reviews, yourPrs, yourAssignments, unreads, enterpriseURL, orgs, rhsState, reviewTargetDays} = getSidebarData(state); return { username, reviews, @@ -21,6 +21,7 @@ function mapStateToProps(state) { enterpriseURL, orgs, rhsState, + reviewTargetDays, }; } diff --git a/webapp/src/components/sidebar_right/sidebar_right.jsx b/webapp/src/components/sidebar_right/sidebar_right.jsx index a50f4a00d..567527929 100644 --- a/webapp/src/components/sidebar_right/sidebar_right.jsx +++ b/webapp/src/components/sidebar_right/sidebar_right.jsx @@ -73,6 +73,7 @@ export default class SidebarRight extends React.PureComponent { yourPrs: PropTypes.arrayOf(PropTypes.object), yourAssignments: PropTypes.arrayOf(PropTypes.object), rhsState: PropTypes.string, + reviewTargetDays: PropTypes.number, theme: PropTypes.object.isRequired, actions: PropTypes.shape({ getYourPrsDetails: PropTypes.func.isRequired, @@ -167,6 +168,8 @@ export default class SidebarRight extends React.PureComponent { diff --git a/webapp/src/reducers/index.ts b/webapp/src/reducers/index.ts index f253aceb4..6cb4adb22 100644 --- a/webapp/src/reducers/index.ts +++ b/webapp/src/reducers/index.ts @@ -63,15 +63,15 @@ function userSettings(state = { } } -function configuration(state = { +function configuration(state: ConfigurationData = { left_sidebar_enabled: true, review_target_days: 0, -}, action: {type: string, data: ConnectedData | ConfigurationData}) { +}, action: {type: string, data: ConnectedData | ConfigurationData}): ConfigurationData { switch (action.type) { case ActionTypes.RECEIVED_CONNECTED: return { ...state, - ...(action.data as ConnectedData).configuration, + ...((action.data as ConnectedData).configuration as Partial), }; case ActionTypes.RECEIVED_CONFIGURATION: return action.data as ConfigurationData; diff --git a/webapp/src/selectors.ts b/webapp/src/selectors.ts index defe70466..2ecd0d3c6 100644 --- a/webapp/src/selectors.ts +++ b/webapp/src/selectors.ts @@ -54,7 +54,7 @@ function mapPrsToDetails(prs: GithubIssueData[], details: PrsDetailsData[]) { export const getSidebarData = createSelector( getPluginState, (pluginState): SidebarData => { - const {username, sidebarContent, reviewDetails, yourPrDetails, organizations, rhsState} = pluginState; + const {username, sidebarContent, reviewDetails, yourPrDetails, organizations, rhsState, configuration: pluginConfig} = pluginState; return { username, reviews: mapPrsToDetails(sidebarContent.reviews || emptyArray, reviewDetails), @@ -63,6 +63,7 @@ export const getSidebarData = createSelector( unreads: sidebarContent.unreads || emptyArray, orgs: organizations, rhsState, + reviewTargetDays: pluginConfig.review_target_days || 0, }; }, ); diff --git a/webapp/src/types/github_types.ts b/webapp/src/types/github_types.ts index e5b90415d..a1800e264 100644 --- a/webapp/src/types/github_types.ts +++ b/webapp/src/types/github_types.ts @@ -56,6 +56,12 @@ export type GithubItem = PrsDetailsData & { export type GithubItemsProps = { items: GithubItem[]; theme: Theme; + + /** When true, render an SLA-status badge (Overdue / Due today / Due in N days) next to each item. */ + showReviewSLA?: boolean; + + /** SLA target in days, used to compute the badge. Falsy disables the badge regardless of showReviewSLA. */ + reviewTargetDays?: number; } export type UserSettingsData = { @@ -171,7 +177,8 @@ export type SidebarData = { yourAssignments: GithubIssueData[], unreads: UnreadsData[] orgs: string[], - rhsState?: string | null + rhsState?: string | null, + reviewTargetDays: number, } export type Organization = { diff --git a/webapp/src/utils/sla.ts b/webapp/src/utils/sla.ts new file mode 100644 index 000000000..0e0fe322b --- /dev/null +++ b/webapp/src/utils/sla.ts @@ -0,0 +1,84 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +const MS_PER_DAY = 24 * 60 * 60 * 1000; + +// daysFromDue is negative when overdue, 0 when due today, positive when in the future. +export type ReviewSLAStatus = { + daysFromDue: number; + overdue: boolean; +}; + +/** + * Returns the ISO timestamp the review SLA clock should start from. Prefers + * review_sla_start (the most recent review request the plugin recorded) and + * falls back to created_at. Returns null when neither is set. + */ +export function getReviewSLAStartIso(item: {review_sla_start?: string | null; created_at?: string | null}): string | null { + if (item.review_sla_start) { + return item.review_sla_start; + } + if (item.created_at) { + return item.created_at; + } + return null; +} + +/** + * Computes the SLA status for a review item, or null when no useful answer is + * possible (no target configured, no start date, unparsable date). The "days" + * are calendar days computed against today's UTC date, matching the server's + * digest math. + */ +export function getReviewSLAStatus( + item: {review_sla_start?: string | null; created_at?: string | null}, + targetDays: number, +): ReviewSLAStatus | null { + if (!targetDays || targetDays <= 0) { + return null; + } + + const startIso = getReviewSLAStartIso(item); + if (!startIso) { + return null; + } + + const start = new Date(startIso); + if (Number.isNaN(start.getTime())) { + return null; + } + + const dueUTC = Date.UTC( + start.getUTCFullYear(), + start.getUTCMonth(), + start.getUTCDate() + targetDays, + ); + const today = new Date(); + const todayUTC = Date.UTC(today.getUTCFullYear(), today.getUTCMonth(), today.getUTCDate()); + const daysFromDue = Math.round((dueUTC - todayUTC) / MS_PER_DAY); + + return { + daysFromDue, + overdue: daysFromDue < 0, + }; +} + +/** + * True when at least one review in the list is overdue against the target. + * Used to drive the red "needs review" indicator on the sidebar button. + */ +export function reviewsHaveOverdue( + reviews: Array<{review_sla_start?: string | null; created_at?: string | null}> | null | undefined, + targetDays: number, +): boolean { + if (!targetDays || !reviews || reviews.length === 0) { + return false; + } + for (const pr of reviews) { + const status = getReviewSLAStatus(pr, targetDays); + if (status && status.overdue) { + return true; + } + } + return false; +} From 428f155615154b3ccd438fb975e36e514aa5569a Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:11:38 -0700 Subject: [PATCH 08/15] MM-68539: instantiate digest GraphQL query response per call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit orgOpenPRsQuery was a package-level var that the graphql library decoded into. Concurrent callers — e.g. the daily digest scheduler running alongside an LHS fetch on a different goroutine — could clobber each other's Search.Nodes mid-decode, producing partial or duplicated PR lists. Define the type at package scope (so the graphql library can still reflect on its tags) but instantiate locally per call so each caller writes into its own struct. --- server/plugin/graphql/digest_query.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/plugin/graphql/digest_query.go b/server/plugin/graphql/digest_query.go index 67f5392e5..6ab3eb4f0 100644 --- a/server/plugin/graphql/digest_query.go +++ b/server/plugin/graphql/digest_query.go @@ -64,7 +64,11 @@ type digestPRSearchNode struct { } `graphql:"... on PullRequest"` } -var orgOpenPRsQuery struct { +// orgOpenPRsSearchQuery is the response shape for GetOpenPRsWithRequestedReviewers. Defined +// at package scope so the graphql library can reflect on its tags, but instantiated locally +// per call so concurrent callers (e.g. the digest scheduler running alongside an LHS fetch) +// don't share mutable state. +type orgOpenPRsSearchQuery struct { Search struct { Nodes []digestPRSearchNode PageInfo struct { @@ -88,6 +92,7 @@ func (c *Client) GetOpenPRsWithRequestedReviewers(ctx context.Context, org strin "cursor": (*githubv4.String)(nil), } + var orgOpenPRsQuery orgOpenPRsSearchQuery var out []DigestPR for { if err := c.executeQuery(ctx, &orgOpenPRsQuery, params); err != nil { From d2eddafafb783d915675690bb0c9168641ae6e84 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:12:05 -0700 Subject: [PATCH 09/15] MM-68539: decompose SLA digest into testable helpers The digest's collectAllOverdueSLAItems was a 90-line function that forged synthetic *github.Issue values to feed into helpers designed for search-result PRs. Replace that with a small abstraction and pull each concern into its own function that can be tested in isolation. - Introduce prRef as the minimal PR identity SLA code needs (owner, repo, number, created_at). effectiveReviewSLAStart now takes prRef directly; user-facing call sites build one via prRefFromIssue. The digest constructs prRef directly from graphql.DigestPR with no forging. - Extract storeReviewSLAStart so the live review_requested webhook and the timeline backfill share one wire-format codepath instead of duplicating the RFC3339Nano encode-and-Set logic. - Extract fetchPRTimeline so timeline events can be cached at the digest level. Previously a PR with N requested reviewers triggered N timeline fetches; now it's one fetch shared across all reviewers on that PR. - Split collectAllOverdueSLAItems into fetchAllOrgOpenPRs, newTeamMemberResolver, gatherReviewersForPR, newDigestSLAStartResolver, and evaluateOverdueForReviewer. - Make pickServiceGitHubUser deterministic by sorting all token-key pages before iterating. Without this, a multi-node cluster could silently pick different service users on different days as KV iteration order shifted, changing which private repos and teams the digest could see. - newDigestSLAStartResolver returns a summarize() callback the caller invokes via defer, so its Info-level "backfill summary" log fires after the resolver has actually been used (rather than at closure-construction time as a defer in the constructor would have done). Counters log as plain ints so structured-log backends can scrape them numerically. The digest skips constructing the resolver entirely when no PRs were fetched, so the summary line doesn't fire on degenerate runs. - Document in the review search query that drafts are excluded but the SLA clock still runs through any draft period: webhook-recorded starts persist regardless of draft state, so a PR review-requested while draft and later un-drafted retains its original SLA start. New tests cover prRefFromIssue, gatherReviewersForPR, pickServiceGitHubUser (deterministic ordering, broken-token skip, no-connected-users), and the summarize() semantics of newDigestSLAStartResolver (no fire at construction, exactly one fire on summarize()). --- server/plugin/plugin.go | 2 +- server/plugin/review_sla.go | 105 +++++++------- server/plugin/review_sla_test.go | 29 ++++ server/plugin/sla_digest.go | 228 +++++++++++++++++++++++-------- server/plugin/sla_digest_test.go | 211 ++++++++++++++++++++++++++++ server/plugin/utils.go | 4 + 6 files changed, 462 insertions(+), 117 deletions(-) diff --git a/server/plugin/plugin.go b/server/plugin/plugin.go index 7c2035c02..2c0ff42ad 100644 --- a/server/plugin/plugin.go +++ b/server/plugin/plugin.go @@ -1137,7 +1137,7 @@ func (p *Plugin) GetToDo(ctx context.Context, info *GitHubUserInfo, githubClient for _, pr := range issueResults.Issues { line := strings.TrimSuffix(getToDoDisplayText(baseURL, pr.GetTitle(), pr.GetHTMLURL(), "", nil), "\n") - slaStart := p.effectiveReviewSLAStart(pr, baseURL, info.GitHubUsername) + slaStart := p.effectiveReviewSLAStart(prRefFromIssue(pr, baseURL), info.GitHubUsername) if suffix, _ := reviewSLAMarkdown(slaStart, targetDays, now); suffix != "" { line += suffix } diff --git a/server/plugin/review_sla.go b/server/plugin/review_sla.go index 9a6fa080a..b518aab37 100644 --- a/server/plugin/review_sla.go +++ b/server/plugin/review_sla.go @@ -19,6 +19,29 @@ import ( const slaReviewReqKeyPrefix = "slarr_v1_" +// prRef is the minimal PR identity the SLA code needs: stable enough to derive the KV key, +// rich enough to fall back to PR open time when no review-request record exists. Centralizing +// this avoids forging *github.Issue values whenever a non-search caller (e.g. the digest) +// needs to ask SLA questions. +type prRef struct { + Owner string + Repo string + Number int + CreatedAt github.Timestamp +} + +// prRefFromIssue extracts a prRef from a search-result issue. Owner/repo come from the API +// response when present, otherwise from the HTML URL (mirrors issueOwnerRepo's behavior). +func prRefFromIssue(pr *github.Issue, baseURL string) prRef { + owner, repo := issueOwnerRepo(pr, baseURL) + return prRef{ + Owner: owner, + Repo: repo, + Number: pr.GetNumber(), + CreatedAt: pr.GetCreatedAt(), + } +} + // reviewSLAStartKey returns a stable KV key for (repo, PR, requested reviewer login). func reviewSLAStartKey(owner, repo string, prNumber int, githubLogin string) string { normalized := strings.ToLower(strings.TrimSpace(owner)) + "/" + strings.ToLower(strings.TrimSpace(repo)) + @@ -39,9 +62,14 @@ func (p *Plugin) recordReviewRequestSLAStart(event *github.PullRequestEvent, req if owner == "" || repo == "" || num == 0 || requestedGitHubLogin == "" { return } - key := reviewSLAStartKey(owner, repo, num, requestedGitHubLogin) - at := time.Now().UTC() - val := []byte(at.Format(time.RFC3339Nano)) + p.storeReviewSLAStart(owner, repo, num, requestedGitHubLogin, time.Now().UTC()) +} + +// storeReviewSLAStart writes a review-request timestamp to KV under the canonical key. Used by +// both the live webhook path and the digest's timeline backfill so the wire format matches. +func (p *Plugin) storeReviewSLAStart(owner, repo string, prNumber int, githubLogin string, t time.Time) { + key := reviewSLAStartKey(owner, repo, prNumber, githubLogin) + val := []byte(t.UTC().Format(time.RFC3339Nano)) if _, err := p.store.Set(key, val); err != nil { p.client.Log.Warn("Failed to store review SLA start time", "key", key, "error", err.Error()) } @@ -103,18 +131,17 @@ func issueOwnerRepo(pr *github.Issue, baseURL string) (owner, repo string) { return parseOwnerAndRepo(pr.GetHTMLURL(), baseURL) } -// effectiveReviewSLAStart returns the timestamp used for SLA: when we recorded a review_request webhook -// for this reviewer on this PR, else the PR created time. -func (p *Plugin) effectiveReviewSLAStart(pr *github.Issue, baseURL, reviewerGitHubLogin string) github.Timestamp { - owner, repo := issueOwnerRepo(pr, baseURL) - num := pr.GetNumber() - if owner == "" || repo == "" || num == 0 { - return pr.GetCreatedAt() +// effectiveReviewSLAStart returns the timestamp used for SLA: when we recorded a review_request +// webhook for this reviewer on this PR, else the PR created time. Read-only — callers serving +// user-facing requests (/github todo, RHS) can call freely without network I/O. +func (p *Plugin) effectiveReviewSLAStart(pr prRef, reviewerGitHubLogin string) github.Timestamp { + if pr.Owner == "" || pr.Repo == "" || pr.Number == 0 { + return pr.CreatedAt } - if t := p.getReviewSLAStartTime(owner, repo, num, reviewerGitHubLogin); !t.IsZero() { + if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, reviewerGitHubLogin); !t.IsZero() { return github.Timestamp{Time: t} } - return pr.GetCreatedAt() + return pr.CreatedAt } // findMostRecentReviewRequestTime walks PR timeline events chronologically and returns the @@ -158,13 +185,12 @@ func findMostRecentReviewRequestTime(events []*github.Timeline, githubLogin stri return current } -// backfillReviewSLAStartFromTimeline reads the PR timeline for (owner, repo, prNumber) and, if -// it finds a surviving review_requested event for githubLogin, persists the timestamp under -// the same KV key used by recordReviewRequestSLAStart so subsequent digest runs are free. -// Returns the resolved time (zero if none was found) so callers can use it directly. -func (p *Plugin) backfillReviewSLAStartFromTimeline(ctx context.Context, gh *github.Client, owner, repo string, prNumber int, githubLogin string) (time.Time, error) { - if gh == nil || owner == "" || repo == "" || prNumber == 0 || githubLogin == "" { - return time.Time{}, nil +// fetchPRTimeline returns every timeline event for (owner, repo, prNumber), paging until done. +// Pulled out of the digest's backfill so it can be cached at a higher level (one fetch per PR +// even when many reviewers on the same PR need backfilling). +func fetchPRTimeline(ctx context.Context, gh *github.Client, owner, repo string, prNumber int) ([]*github.Timeline, error) { + if gh == nil || owner == "" || repo == "" || prNumber == 0 { + return nil, nil } var events []*github.Timeline @@ -172,7 +198,7 @@ func (p *Plugin) backfillReviewSLAStartFromTimeline(ctx context.Context, gh *git for { page, resp, err := gh.Issues.ListIssueTimeline(ctx, owner, repo, prNumber, opts) if err != nil { - return time.Time{}, err + return nil, err } events = append(events, page...) if resp == nil || resp.NextPage == 0 { @@ -180,42 +206,7 @@ func (p *Plugin) backfillReviewSLAStartFromTimeline(ctx context.Context, gh *git } opts.Page = resp.NextPage } - - found := findMostRecentReviewRequestTime(events, githubLogin) - if found.IsZero() { - return time.Time{}, nil - } - - key := reviewSLAStartKey(owner, repo, prNumber, githubLogin) - val := []byte(found.UTC().Format(time.RFC3339Nano)) - if _, err := p.store.Set(key, val); err != nil { - p.client.Log.Warn("Failed to cache backfilled review SLA start time", "key", key, "error", err.Error()) - } - return found.UTC(), nil -} - -// effectiveReviewSLAStartForDigest is the digest's SLA-start resolver. It prefers the cached -// KV record, falls back to a one-time timeline backfill (caching the result), and finally -// falls back to PR open date. Unlike effectiveReviewSLAStart this can make a network call, -// so callers serving user-facing requests (/github todo, RHS) should keep using -// effectiveReviewSLAStart. -func (p *Plugin) effectiveReviewSLAStartForDigest(ctx context.Context, gh *github.Client, pr *github.Issue, baseURL, reviewerGitHubLogin string) github.Timestamp { - owner, repo := issueOwnerRepo(pr, baseURL) - num := pr.GetNumber() - if owner == "" || repo == "" || num == 0 { - return pr.GetCreatedAt() - } - if t := p.getReviewSLAStartTime(owner, repo, num, reviewerGitHubLogin); !t.IsZero() { - return github.Timestamp{Time: t} - } - if gh != nil { - if t, err := p.backfillReviewSLAStartFromTimeline(ctx, gh, owner, repo, num, reviewerGitHubLogin); err != nil { - p.client.Log.Debug("Timeline backfill failed; falling back to PR created_at", "owner", owner, "repo", repo, "pr", num, "reviewer", reviewerGitHubLogin, "error", err.Error()) - } else if !t.IsZero() { - return github.Timestamp{Time: t} - } - } - return pr.GetCreatedAt() + return events, nil } // enrichReviewsWithSLAStart sets review_sla_start on LHS review items so the webapp can match server SLA logic. @@ -229,7 +220,7 @@ func (p *Plugin) enrichReviewsWithSLAStart(reviews []*graphql.GithubPRDetails, r if d == nil || d.Issue == nil { continue } - eff := p.effectiveReviewSLAStart(d.Issue, baseURL, reviewerLogin) + eff := p.effectiveReviewSLAStart(prRefFromIssue(d.Issue, baseURL), reviewerLogin) if eff.IsZero() { continue } diff --git a/server/plugin/review_sla_test.go b/server/plugin/review_sla_test.go index d5c5c93c4..d9af0cd4d 100644 --- a/server/plugin/review_sla_test.go +++ b/server/plugin/review_sla_test.go @@ -20,6 +20,35 @@ func TestReviewSLAStartKeyStable(t *testing.T) { assert.NotEqual(t, k1, k3) } +func TestPrRefFromIssue(t *testing.T) { + const baseURL = "https://github.com/" + createdAt := github.Timestamp{Time: time.Date(2026, 4, 20, 10, 0, 0, 0, time.UTC)} + + t.Run("prefers explicit Repository fields", func(t *testing.T) { + pr := &github.Issue{ + Number: github.Int(42), + HTMLURL: github.String("https://github.com/wrong/wrong/pull/42"), + CreatedAt: &createdAt, + Repository: &github.Repository{ + Name: github.String("right"), + Owner: &github.User{Login: github.String("owner")}, + }, + } + got := prRefFromIssue(pr, baseURL) + assert.Equal(t, prRef{Owner: "owner", Repo: "right", Number: 42, CreatedAt: createdAt}, got) + }) + + t.Run("falls back to HTMLURL parsing when Repository is missing", func(t *testing.T) { + pr := &github.Issue{ + Number: github.Int(7), + HTMLURL: github.String("https://github.com/mattermost/plugin-github/pull/7"), + CreatedAt: &createdAt, + } + got := prRefFromIssue(pr, baseURL) + assert.Equal(t, prRef{Owner: "mattermost", Repo: "plugin-github", Number: 7, CreatedAt: createdAt}, got) + }) +} + func TestFindMostRecentReviewRequestTime(t *testing.T) { user := func(login string) *github.User { return &github.User{Login: github.String(login)} } at := func(s string) *github.Timestamp { diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index 3b4c608c0..f4270fb42 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -104,12 +104,17 @@ func (p *Plugin) resolveReviewerDisplayName(githubLogin string) string { } // pickServiceGitHubUser returns the first connected user the digest can use as a service -// caller for org-wide GitHub queries. Skips users whose token cannot be loaded. Returns nil -// when no connected user is available; the digest cannot run in that case. +// caller for org-wide GitHub queries. Keys are sorted before iteration so the choice is +// deterministic across runs (and across cluster nodes) — otherwise the digest's visibility +// into private repos/teams could silently shift run-to-run with KV iteration order. +// +// Returns nil when no connected user is available; the digest cannot run in that case. func (p *Plugin) pickServiceGitHubUser(ctx context.Context) *GitHubUserInfo { checker := func(key string) (bool, error) { return strings.HasSuffix(key, githubTokenKey), nil } + + var allKeys []string for page := 0; ; page++ { if ctx.Err() != nil { return nil @@ -119,25 +124,30 @@ func (p *Plugin) pickServiceGitHubUser(ctx context.Context) *GitHubUserInfo { p.client.Log.Warn("SLA digest failed to list connected users", "error", err.Error()) return nil } - for _, key := range keys { - userID := strings.TrimSuffix(key, githubTokenKey) - ghInfo, apiErr := p.getGitHubUserInfo(userID) - if apiErr != nil || ghInfo == nil { - continue - } - return ghInfo - } + allKeys = append(allKeys, keys...) if len(keys) < keysPerPage { break } } + sort.Strings(allKeys) + + for _, key := range allKeys { + if ctx.Err() != nil { + return nil + } + userID := strings.TrimSuffix(key, githubTokenKey) + ghInfo, apiErr := p.getGitHubUserInfo(userID) + if apiErr != nil || ghInfo == nil { + continue + } + return ghInfo + } return nil } func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry { config := p.getConfiguration() targetDays := config.ReviewTargetDays - baseURL := config.getBaseURL() orgList := config.getOrganizations() now := time.Now() @@ -155,10 +165,44 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry githubClient := p.githubConnectUser(ctx, serviceUser) graphQLClient := p.graphQLConnect(serviceUser) + allPRs := p.fetchAllOrgOpenPRs(ctx, graphQLClient, orgList) + if len(allPRs) == 0 { + return nil + } + + resolveTeam := newTeamMemberResolver(ctx, githubClient, p.client.Log) + resolveSLAStart, summarizeSLAStart := p.newDigestSLAStartResolver(ctx, githubClient) + defer summarizeSLAStart() + + out := make([]slaDigestEntry, 0, len(allPRs)) + seen := make(map[string]bool) + for _, pr := range allPRs { + if ctx.Err() != nil { + return out + } + ref := prRef{ + Owner: pr.Owner, + Repo: pr.Repo, + Number: pr.Number, + CreatedAt: github.Timestamp{Time: pr.CreatedAt}, + } + for _, login := range gatherReviewersForPR(pr, resolveTeam) { + entry := p.evaluateOverdueForReviewer(ref, pr, login, targetDays, now, seen, resolveSLAStart) + if entry != nil { + out = append(out, *entry) + } + } + } + return out +} + +// fetchAllOrgOpenPRs runs the org-wide PR search once per configured org, logging and skipping +// orgs that fail rather than aborting the whole digest. +func (p *Plugin) fetchAllOrgOpenPRs(ctx context.Context, graphQLClient *graphql.Client, orgList []string) []graphql.DigestPR { var allPRs []graphql.DigestPR for _, org := range orgList { if ctx.Err() != nil { - return nil + return allPRs } prs, err := graphQLClient.GetOpenPRsWithRequestedReviewers(ctx, org) if err != nil { @@ -167,11 +211,17 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry } allPRs = append(allPRs, prs...) } + return allPRs +} - teamCache := make(map[string][]string) - resolveTeam := func(team graphql.DigestTeamRef) []string { - key := team.Org + "/" + team.Slug - if members, ok := teamCache[key]; ok { +// newTeamMemberResolver returns a closure that expands org/team references to member logins, +// memoizing the result so the same team is fetched at most once per digest run. Keys are +// lowercased so case differences in the GraphQL response don't fragment the cache. +func newTeamMemberResolver(ctx context.Context, githubClient *github.Client, log pluginapi.LogService) func(graphql.DigestTeamRef) []string { + cache := make(map[string][]string) + return func(team graphql.DigestTeamRef) []string { + key := strings.ToLower(team.Org + "/" + team.Slug) + if members, ok := cache[key]; ok { return members } var members []string @@ -179,7 +229,7 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry for { page, resp, err := githubClient.Teams.ListTeamMembersBySlug(ctx, team.Org, team.Slug, opts) if err != nil { - p.client.Log.Debug("SLA digest team expansion failed", "team", key, "error", err.Error()) + log.Debug("SLA digest team expansion failed", "team", key, "error", err.Error()) break } for _, u := range page { @@ -192,62 +242,122 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry } opts.Page = resp.NextPage } - teamCache[key] = members + cache[key] = members return members } +} - var out []slaDigestEntry - seen := make(map[string]bool) +// gatherReviewersForPR flattens a PR's directly-requested users and all members of its +// requested teams into a single login list. Empty logins are dropped at the consumer. +func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTeamRef) []string) []string { + logins := append([]string(nil), pr.RequestedUsers...) + for _, team := range pr.RequestedTeams { + logins = append(logins, resolveTeam(team)...) + } + return logins +} - for _, pr := range allPRs { - if ctx.Err() != nil { - return out - } +// newDigestSLAStartResolver returns a closure that resolves a reviewer's SLA-start time for +// the digest, with two layers of caching: +// +// 1. The KV record from the live review_requested webhook (free). +// 2. A timeline backfill, fetched at most once per PR per digest run and shared across +// reviewers on the same PR. Resolved times are written back to KV so subsequent runs hit +// the fast path. +// +// Falls back to the PR open time when no review_requested event is found, matching the +// read-only effectiveReviewSLAStart contract. +// +// Also returns a summarize() callback the caller must invoke once the resolver is no longer +// in use (typically via defer). It logs cumulative cache hit / timeline-fetch counts at Info +// level so admins can verify the backfill is healthy across runs (see Day-1-vs-steady-state +// patterns in the docs); calling it before the resolver runs would log misleading zeroes, +// which is why this isn't wired up via defer inside this constructor. +func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Client) (resolve func(prRef, string) github.Timestamp, summarize func()) { + type prKey struct { + owner string + repo string + num int + } + timelineCache := make(map[prKey][]*github.Timeline) + timelineFetched := make(map[prKey]bool) + var fetched, hits int - logins := append([]string(nil), pr.RequestedUsers...) - for _, team := range pr.RequestedTeams { - logins = append(logins, resolveTeam(team)...) + resolve = func(pr prRef, login string) github.Timestamp { + if pr.Owner == "" || pr.Repo == "" || pr.Number == 0 { + return pr.CreatedAt } - if len(logins) == 0 { - continue + if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, login); !t.IsZero() { + hits++ + return github.Timestamp{Time: t} } - - // Synthetic *github.Issue so we can reuse effectiveReviewSLAStartForDigest, which - // expects the same shape as the per-user search results. - prIssue := &github.Issue{ - Number: github.Int(pr.Number), - Title: github.String(pr.Title), - HTMLURL: github.String(pr.URL), - CreatedAt: &github.Timestamp{Time: pr.CreatedAt}, - Repository: &github.Repository{ - Name: github.String(pr.Repo), - Owner: &github.User{Login: github.String(pr.Owner)}, - }, + if gh == nil { + return pr.CreatedAt } - for _, login := range logins { - if login == "" { - continue - } - dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(login) - if seen[dedupeKey] { - continue + key := prKey{pr.Owner, pr.Repo, pr.Number} + events, ok := timelineCache[key] + if !ok && !timelineFetched[key] { + var err error + events, err = fetchPRTimeline(ctx, gh, pr.Owner, pr.Repo, pr.Number) + if err != nil { + p.client.Log.Debug("SLA digest timeline fetch failed; falling back to PR created_at", + "owner", pr.Owner, "repo", pr.Repo, "pr", pr.Number, "error", err.Error()) + events = nil } - seen[dedupeKey] = true + timelineCache[key] = events + timelineFetched[key] = true + fetched++ + } - slaStart := p.effectiveReviewSLAStartForDigest(ctx, githubClient, prIssue, baseURL, login) - diff := slaCalendarDiffDays(slaStart, targetDays, now) - if diff >= 0 { - continue - } - daysOverdue := -diff - reviewerDisplay := p.resolveReviewerDisplayName(login) - line := formatChannelOverdueReviewLine(reviewerDisplay, pr.Title, pr.URL, baseURL) - out = append(out, slaDigestEntry{DaysOverdue: daysOverdue, Line: line}) + found := findMostRecentReviewRequestTime(events, login) + if found.IsZero() { + return pr.CreatedAt } + p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, login, found) + return github.Timestamp{Time: found.UTC()} } - return out + summarize = func() { + p.client.Log.Info("SLA digest backfill summary", + "timeline_pages_fetched", fetched, + "kv_hits", hits) + } + + return resolve, summarize +} + +// evaluateOverdueForReviewer returns a digest entry for (pr, login) when the reviewer is past +// SLA, or nil when not overdue, already accounted for, or missing identity. seen is mutated +// to record the (pr, login) pair so duplicate review requests across team membership and +// direct user-request don't double-count. +func (p *Plugin) evaluateOverdueForReviewer( + ref prRef, + pr graphql.DigestPR, + login string, + targetDays int, + now time.Time, + seen map[string]bool, + resolveSLAStart func(prRef, string) github.Timestamp, +) *slaDigestEntry { + if login == "" { + return nil + } + dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(login) + if seen[dedupeKey] { + return nil + } + seen[dedupeKey] = true + + slaStart := resolveSLAStart(ref, login) + diff := slaCalendarDiffDays(slaStart, targetDays, now) + if diff >= 0 { + return nil + } + + reviewerDisplay := p.resolveReviewerDisplayName(login) + line := formatChannelOverdueReviewLine(reviewerDisplay, pr.Title, pr.URL, p.getConfiguration().getBaseURL()) + return &slaDigestEntry{DaysOverdue: -diff, Line: line} } // slaBuckets enumerates the digest's overdue buckets in display order (most overdue first). diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index c172304f6..bf69170d6 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -4,10 +4,25 @@ package plugin import ( + "context" + "encoding/json" + "errors" "strings" "testing" + "time" + "github.com/golang/mock/gomock" + "github.com/google/go-github/v54/github" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/oauth2" + + "github.com/mattermost/mattermost/server/public/plugin/plugintest" + "github.com/mattermost/mattermost/server/public/pluginapi" + + "github.com/mattermost/mattermost-plugin-github/server/mocks" + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" ) func TestFormatChannelOverdueReviewLine(t *testing.T) { @@ -164,3 +179,199 @@ func TestBuildSLADigestMessage(t *testing.T) { assert.True(t, ai >= 0 && mi > ai && zi > mi, "expected alpha < mu < zeta in output") }) } + +func TestGatherReviewersForPR(t *testing.T) { + t.Run("flattens direct user requests", func(t *testing.T) { + pr := graphql.DigestPR{ + RequestedUsers: []string{"alice", "bob"}, + } + got := gatherReviewersForPR(pr, func(graphql.DigestTeamRef) []string { return nil }) + assert.Equal(t, []string{"alice", "bob"}, got) + }) + + t.Run("expands team references through the resolver", func(t *testing.T) { + pr := graphql.DigestPR{ + RequestedUsers: []string{"alice"}, + RequestedTeams: []graphql.DigestTeamRef{{Org: "mattermost", Slug: "core"}}, + } + resolver := func(team graphql.DigestTeamRef) []string { + if team.Slug == "core" { + return []string{"bob", "carol"} + } + return nil + } + got := gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice", "bob", "carol"}, got) + }) + + t.Run("does not mutate the underlying RequestedUsers slice", func(t *testing.T) { + original := []string{"alice"} + pr := graphql.DigestPR{ + RequestedUsers: original, + RequestedTeams: []graphql.DigestTeamRef{{Org: "mattermost", Slug: "core"}}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + _ = gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice"}, original, "team expansion must not append into the caller's slice") + }) + + t.Run("returns empty slice when no reviewers are requested", func(t *testing.T) { + got := gatherReviewersForPR(graphql.DigestPR{}, func(graphql.DigestTeamRef) []string { return nil }) + assert.Empty(t, got) + }) +} + +func TestPickServiceGitHubUser_DeterministicOrdering(t *testing.T) { + // Returns the lexicographically-first connected user even when KV iteration order is + // reversed, so digest output is stable across runs and cluster nodes. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return( + []string{ + "user-z" + githubTokenKey, + "user-a" + githubTokenKey, + "user-m" + githubTokenKey, + }, + nil, + ) + + encryptedToken, err := encrypt([]byte("dummyEncryptKey1"), MockAccessToken) + require.NoError(t, err) + storedInfo := GitHubUserInfo{ + UserID: "user-a", + GitHubUsername: "user-a-gh", + Token: &oauth2.Token{AccessToken: encryptedToken}, + Settings: &UserSettings{}, + } + infoBytes, err := json.Marshal(&storedInfo) + require.NoError(t, err) + + mockKvStore.EXPECT().Get("user-a"+githubTokenKey, gomock.Any()).DoAndReturn( + func(_ string, out any) error { + return json.Unmarshal(infoBytes, out) + }, + ) + + picked := p.pickServiceGitHubUser(context.Background()) + require.NotNil(t, picked, "expected a connected user to be picked") + assert.Equal(t, "user-a", picked.UserID, "expected lexicographically-first user") +} + +func TestPickServiceGitHubUser_SkipsUsersWithBrokenTokens(t *testing.T) { + // A user whose KV record can't be loaded should be skipped, not abort the digest. We + // expect the iterator to fall through to the next user. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return( + []string{"user-a" + githubTokenKey, "user-b" + githubTokenKey}, + nil, + ) + + mockKvStore.EXPECT().Get("user-a"+githubTokenKey, gomock.Any()).Return(errors.New("kv read failed")) + + encryptedToken, err := encrypt([]byte("dummyEncryptKey1"), MockAccessToken) + require.NoError(t, err) + storedInfo := GitHubUserInfo{ + UserID: "user-b", + GitHubUsername: "user-b-gh", + Token: &oauth2.Token{AccessToken: encryptedToken}, + Settings: &UserSettings{}, + } + infoBytes, err := json.Marshal(&storedInfo) + require.NoError(t, err) + mockKvStore.EXPECT().Get("user-b"+githubTokenKey, gomock.Any()).DoAndReturn( + func(_ string, out any) error { + return json.Unmarshal(infoBytes, out) + }, + ) + + picked := p.pickServiceGitHubUser(context.Background()) + require.NotNil(t, picked) + assert.Equal(t, "user-b", picked.UserID) +} + +func TestPickServiceGitHubUser_NoConnectedUsers(t *testing.T) { + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return([]string{}, nil) + + assert.Nil(t, p.pickServiceGitHubUser(context.Background())) +} + +func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { + // The summary log must (a) not fire until summarize() is called and (b) reflect the + // resolver's actual usage when it does. Locks in the fix for a prior bug where the log + // was deferred inside the constructor and so always reported zeroes. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + cachedTime := time.Date(2026, 4, 1, 12, 0, 0, 0, time.UTC) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ string, out any) error { + b, ok := out.(*[]byte) + require.True(t, ok, "getReviewSLAStartTime should pass *[]byte") + *b = []byte(cachedTime.Format(time.RFC3339Nano)) + return nil + }, + ).AnyTimes() + + api, ok := p.API.(*plugintest.API) + require.True(t, ok, "expected plugintest.API") + var ( + logCalls int + loggedHits int + loggedFetches int + ) + api.On("LogInfo", + "SLA digest backfill summary", + "timeline_pages_fetched", mock.AnythingOfType("int"), + "kv_hits", mock.AnythingOfType("int"), + ).Run(func(args mock.Arguments) { + logCalls++ + loggedFetches = args.Get(2).(int) + loggedHits = args.Get(4).(int) + }).Maybe() + + resolve, summarize := p.newDigestSLAStartResolver(context.Background(), nil) + require.Equal(t, 0, logCalls, "summary must not fire at construction time") + + got := resolve(prRef{ + Owner: "owner", + Repo: "repo", + Number: 1, + CreatedAt: github.Timestamp{Time: time.Now().UTC()}, + }, "alice") + require.Equal(t, 0, logCalls, "summary must not fire on resolver invocation") + require.True(t, got.UTC().Equal(cachedTime), + "resolver should return the cached SLA start time; got %v want %v", got.UTC(), cachedTime) + + summarize() + assert.Equal(t, 1, logCalls, "summary should fire exactly once when summarize() is called") + assert.Equal(t, 1, loggedHits, "kv_hits should reflect the resolver's actual usage after one cached lookup") + assert.Equal(t, 0, loggedFetches, "no timeline pages fetched on the cached path") +} + +// setupServiceUserPickTest wires up a Plugin instance with a mocked KvStore and a mock +// plugintest.API that swallows the warn-level logs pickServiceGitHubUser emits on failure +// paths. Returns the plugin, the KV mock for setting expectations, and the gomock controller +// for the caller to Finish(). +func setupServiceUserPickTest(t *testing.T) (*Plugin, *mocks.MockKvStore, *gomock.Controller) { + t.Helper() + ctrl := gomock.NewController(t) + mockKvStore := mocks.NewMockKvStore(ctrl) + + api := &plugintest.API{} + api.On("LogWarn", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + api.On("LogDebug", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + p := NewPlugin() + p.store = mockKvStore + p.SetAPI(api) + p.client = pluginapi.NewClient(api, p.Driver) + p.setConfiguration(&Configuration{EncryptionKey: "dummyEncryptKey1"}) + + return p, mockKvStore, ctrl +} diff --git a/server/plugin/utils.go b/server/plugin/utils.go index 4dc7b277c..04b6bea1e 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -29,6 +29,10 @@ func getMentionSearchQuery(username string, orgs []string) string { return buildSearchQuery("is:open mentions:%v archived:false %v", username, orgs) } +// getReviewSearchQuery returns the GitHub search query for a user's pending review requests. +// Drafts are excluded from the search results, but the SLA clock still runs through any draft +// period: review_requested webhooks are recorded regardless of draft state, so a PR that was +// review-requested while draft and later un-drafted retains its original SLA start time. func getReviewSearchQuery(username string, orgs []string) string { return buildSearchQuery("is:pr is:open draft:false review-requested:%v archived:false %v", username, orgs) } From be69b1c60c6a97cf263e643b97e7bd6f455a7746 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:19:27 -0700 Subject: [PATCH 10/15] MM-68539: fix gofmt alignment in SLA digest test CI's gofmt check flagged the var-block in TestNewDigestSLAStartResolver_LogsAccurateCounters: the type column was over-indented by one space because each name had been padded by one too many. --- server/plugin/sla_digest_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index bf69170d6..c3c643c44 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -321,9 +321,9 @@ func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { api, ok := p.API.(*plugintest.API) require.True(t, ok, "expected plugintest.API") var ( - logCalls int - loggedHits int - loggedFetches int + logCalls int + loggedHits int + loggedFetches int ) api.On("LogInfo", "SLA digest backfill summary", From b085e9ddba3fe4ecc46ad1c1ef8d58702c291079 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:38:10 -0700 Subject: [PATCH 11/15] MM-68539: address CodeRabbit review feedback Four findings, all addressed: 1. (sidebar_buttons.jsx) Treat negative reviewTargetDays as disabled. reviewButtonStyle's `!targetDays` guard let a negative value through and would render the green "online" indicator instead of leaving the button neutral. Match the canonical `!targetDays || targetDays <= 0` guard used by getReviewSLAStatus / reviewsHaveOverdue. 2. (sla_digest.go) Differentiate "digest could not run" from "nothing overdue." collectAllOverdueSLAItems now returns (entries, ok). ok is false when no orgs are configured, no connected service user is available, or every configured org's GraphQL search failed (tracked via a new anyOrgOK return from fetchAllOrgOpenPRs). The caller only advances slaDigestDayKVKey when ok is true, so a transient first-of-day failure no longer skips every retry until tomorrow. 3. (sla_digest.go) Suppress the "SLA digest backfill summary" log when neither timeline_pages_fetched nor kv_hits moved. Prevents a misleading "0,0" line on degenerate runs (e.g. PRs without requested reviewers, or every reviewer hitting the early-return paths in resolve). 4. (sla_digest.go / review_sla.go) Preserve team-request provenance through SLA evaluation. gatherReviewersForPR now returns []reviewerRequest with each reviewer's originating teams attached and dedupes case-insensitively on login. The digest resolver, when no user-scoped review_requested timeline event exists for a reviewer, falls back to the earliest still- surviving team-scoped review_requested event via the new findEarliestSurvivingTeamRequestTime helper. This fixes the cold-cache case where a user pending solely because their team was requested would fall all the way back to pr.CreatedAt and overstate days-overdue. The resolved time is written to KV under the user-scoped key so subsequent runs hit the fast path. New tests: - TestGatherReviewersForPR cases for team-only origin, dedup of direct + team into a single entry retaining team origin, multi-team accumulation, and case-insensitive dedup. - TestNewDigestSLAStartResolver_SkipsSummaryLogWhenNoLookups locks in the guarded summarize() behavior. - TestFindEarliestSurvivingTeamRequestTime covers add/remove, multi-team earliest-wins semantics, defensive sort, and case-insensitive slug match. --- server/plugin/review_sla.go | 65 +++++++ server/plugin/review_sla_test.go | 112 ++++++++++++ server/plugin/sla_digest.go | 164 +++++++++++++----- server/plugin/sla_digest_test.go | 87 +++++++++- .../sidebar_buttons/sidebar_buttons.jsx | 5 +- 5 files changed, 379 insertions(+), 54 deletions(-) diff --git a/server/plugin/review_sla.go b/server/plugin/review_sla.go index b518aab37..eaa4e01ea 100644 --- a/server/plugin/review_sla.go +++ b/server/plugin/review_sla.go @@ -185,6 +185,71 @@ func findMostRecentReviewRequestTime(events []*github.Timeline, githubLogin stri return current } +// findEarliestSurvivingTeamRequestTime walks PR timeline events and returns the earliest +// surviving review_requested event time across any of the given teams. "Surviving" means +// not later cancelled by a matching review_request_removed event for the same team. Returns +// the zero time when no team has a still-active request (or when teams is empty). +// +// Used as a fallback for reviewers added solely via team membership: their user-scoped +// timeline doesn't contain a review_requested event, so without this they'd fall all the way +// back to the PR's created_at and overstate days-overdue. The earliest still-active team +// request is the right anchor: if a user is in two requested teams, they have been on the +// hook since the first ask, and a later team request doesn't reset that clock. +// +// Match is on team slug only. A PR's timeline lives within a single GitHub org, and team +// slugs are unique within an org, so cross-org slug collision isn't possible here. +func findEarliestSurvivingTeamRequestTime(events []*github.Timeline, teams []graphql.DigestTeamRef) time.Time { + if len(teams) == 0 { + return time.Time{} + } + wantedSlugs := make(map[string]bool, len(teams)) + for _, t := range teams { + slug := strings.ToLower(strings.TrimSpace(t.Slug)) + if slug != "" { + wantedSlugs[slug] = true + } + } + if len(wantedSlugs) == 0 { + return time.Time{} + } + + sorted := make([]*github.Timeline, 0, len(events)) + for _, e := range events { + if e == nil || e.CreatedAt == nil { + continue + } + sorted = append(sorted, e) + } + sort.SliceStable(sorted, func(i, j int) bool { + return sorted[i].CreatedAt.Before(sorted[j].CreatedAt.Time) + }) + + surviving := make(map[string]time.Time) + for _, e := range sorted { + if e.RequestedTeam == nil { + continue + } + slug := strings.ToLower(e.RequestedTeam.GetSlug()) + if !wantedSlugs[slug] { + continue + } + switch e.GetEvent() { + case "review_requested": + surviving[slug] = e.CreatedAt.Time + case "review_request_removed": + delete(surviving, slug) + } + } + + var earliest time.Time + for _, t := range surviving { + if earliest.IsZero() || t.Before(earliest) { + earliest = t + } + } + return earliest +} + // fetchPRTimeline returns every timeline event for (owner, repo, prNumber), paging until done. // Pulled out of the digest's backfill so it can be cached at a higher level (one fetch per PR // even when many reviewers on the same PR need backfilling). diff --git a/server/plugin/review_sla_test.go b/server/plugin/review_sla_test.go index d9af0cd4d..562d8c8af 100644 --- a/server/plugin/review_sla_test.go +++ b/server/plugin/review_sla_test.go @@ -9,6 +9,8 @@ import ( "github.com/google/go-github/v54/github" "github.com/stretchr/testify/assert" + + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" ) func TestReviewSLAStartKeyStable(t *testing.T) { @@ -135,3 +137,113 @@ func TestFindMostRecentReviewRequestTime(t *testing.T) { assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) }) } + +func TestFindEarliestSurvivingTeamRequestTime(t *testing.T) { + at := func(s string) *github.Timestamp { + ts, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("bad timestamp %q: %v", s, err) + } + return &github.Timestamp{Time: ts} + } + teamEv := func(name, slug, ts string) *github.Timeline { + return &github.Timeline{ + Event: github.String(name), + RequestedTeam: &github.Team{Slug: github.String(slug)}, + CreatedAt: at(ts), + } + } + + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + platform := graphql.DigestTeamRef{Org: "mattermost", Slug: "platform"} + + t.Run("empty teams returns zero", func(t *testing.T) { + got := findEarliestSurvivingTeamRequestTime(nil, nil) + assert.True(t, got.IsZero()) + }) + + t.Run("single team request returns its timestamp", func(t *testing.T) { + events := []*github.Timeline{teamEv("review_requested", "core", "2026-04-20T10:00:00Z")} + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("removed team request leaves no surviving anchor", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.True(t, got.IsZero()) + }) + + t.Run("re-request after remove uses the most recent request for that team", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("two surviving teams return the earlier ask", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "platform", "2026-04-15T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core, platform}) + assert.Equal(t, "2026-04-15T10:00:00Z", got.Format(time.RFC3339), + "reviewer in two requested teams has been on the hook since the EARLIEST surviving ask") + }) + + t.Run("removed team is excluded from the earliest calculation", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "platform", "2026-04-15T10:00:00Z"), + teamEv("review_request_removed", "platform", "2026-04-16T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core, platform}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("events for non-requested teams are ignored", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "other-team", "2026-04-15T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("user-scoped events with nil RequestedTeam are skipped", func(t *testing.T) { + userEv := &github.Timeline{ + Event: github.String("review_requested"), + Reviewer: &github.User{Login: github.String("alice")}, + CreatedAt: at("2026-04-15T10:00:00Z"), + } + events := []*github.Timeline{ + userEv, + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("out-of-order pages are sorted defensively", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339), + "defensive sort means the latest event wins after re-request, even if pages arrived out of order") + }) + + t.Run("team slug match is case-insensitive", func(t *testing.T) { + events := []*github.Timeline{teamEv("review_requested", "Core", "2026-04-20T10:00:00Z")} + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{{Slug: "CORE"}}) + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) +} diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index f4270fb42..c11dc8f69 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -65,7 +65,14 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { return } - entries := p.collectAllOverdueSLAItems(ctx) + entries, ok := p.collectAllOverdueSLAItems(ctx) + if !ok { + // Distinguishes "digest could not complete a real scan" (config issue, no service user, + // or every configured org's GraphQL fetch failed) from "scan ran and found nothing + // overdue." We deliberately do NOT advance slaDigestDayKVKey here so the 5-minute + // scheduler retries within the same local day instead of skipping until tomorrow. + return + } if len(entries) == 0 { if _, err := p.store.Set(slaDigestDayKVKey, []byte(day)); err != nil { p.client.Log.Warn("Failed to store SLA digest day marker", "error", err.Error()) @@ -145,7 +152,13 @@ func (p *Plugin) pickServiceGitHubUser(ctx context.Context) *GitHubUserInfo { return nil } -func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry { +// collectAllOverdueSLAItems returns the digest's overdue entries along with an "ok" flag the +// caller uses to decide whether to advance the daily marker. ok is false when the digest +// could not complete a real scan (no orgs configured, no connected service user, or every +// configured org's GraphQL fetch failed); the caller should retry on the next scheduler tick +// rather than treat that as "ran successfully and found nothing." A successful scan returns +// ok=true even when entries is empty. +func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) ([]slaDigestEntry, bool) { config := p.getConfiguration() targetDays := config.ReviewTargetDays orgList := config.getOrganizations() @@ -153,21 +166,25 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry if len(orgList) == 0 { p.client.Log.Warn("SLA digest cannot run without configured organizations (System Console -> Plugins -> GitHub -> GitHub Organizations)") - return nil + return nil, false } serviceUser := p.pickServiceGitHubUser(ctx) if serviceUser == nil { p.client.Log.Warn("SLA digest cannot run: no connected GitHub user available to act as the service caller") - return nil + return nil, false } githubClient := p.githubConnectUser(ctx, serviceUser) graphQLClient := p.graphQLConnect(serviceUser) - allPRs := p.fetchAllOrgOpenPRs(ctx, graphQLClient, orgList) + allPRs, anyOrgOK := p.fetchAllOrgOpenPRs(ctx, graphQLClient, orgList) + if !anyOrgOK { + p.client.Log.Warn("SLA digest cannot run: no configured organization returned a successful PR search") + return nil, false + } if len(allPRs) == 0 { - return nil + return nil, true } resolveTeam := newTeamMemberResolver(ctx, githubClient, p.client.Log) @@ -178,7 +195,7 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry seen := make(map[string]bool) for _, pr := range allPRs { if ctx.Err() != nil { - return out + return out, true } ref := prRef{ Owner: pr.Owner, @@ -186,32 +203,35 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry Number: pr.Number, CreatedAt: github.Timestamp{Time: pr.CreatedAt}, } - for _, login := range gatherReviewersForPR(pr, resolveTeam) { - entry := p.evaluateOverdueForReviewer(ref, pr, login, targetDays, now, seen, resolveSLAStart) + for _, rr := range gatherReviewersForPR(pr, resolveTeam) { + entry := p.evaluateOverdueForReviewer(ref, pr, rr, targetDays, now, seen, resolveSLAStart) if entry != nil { out = append(out, *entry) } } } - return out + return out, true } // fetchAllOrgOpenPRs runs the org-wide PR search once per configured org, logging and skipping -// orgs that fail rather than aborting the whole digest. -func (p *Plugin) fetchAllOrgOpenPRs(ctx context.Context, graphQLClient *graphql.Client, orgList []string) []graphql.DigestPR { - var allPRs []graphql.DigestPR +// orgs that fail rather than aborting the whole digest. Returns the combined PR list and a +// flag that is true iff at least one configured org returned successfully (a zero-PR org is +// still a success). The caller uses anyOK to distinguish "every org failed, retry later" from +// "configured orgs are simply quiet." +func (p *Plugin) fetchAllOrgOpenPRs(ctx context.Context, graphQLClient *graphql.Client, orgList []string) (allPRs []graphql.DigestPR, anyOK bool) { for _, org := range orgList { if ctx.Err() != nil { - return allPRs + return allPRs, anyOK } prs, err := graphQLClient.GetOpenPRsWithRequestedReviewers(ctx, org) if err != nil { p.client.Log.Warn("SLA digest org PR fetch failed", "org", org, "error", err.Error()) continue } + anyOK = true allPRs = append(allPRs, prs...) } - return allPRs + return allPRs, anyOK } // newTeamMemberResolver returns a closure that expands org/team references to member logins, @@ -247,33 +267,74 @@ func newTeamMemberResolver(ctx context.Context, githubClient *github.Client, log } } -// gatherReviewersForPR flattens a PR's directly-requested users and all members of its -// requested teams into a single login list. Empty logins are dropped at the consumer. -func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTeamRef) []string) []string { - logins := append([]string(nil), pr.RequestedUsers...) +// reviewerRequest preserves a reviewer login alongside the team(s) they were added through, so +// the SLA backfill can fall back to a team-scoped review_requested timeline event when the +// reviewer has no user-scoped request. Direct (non-team) requests carry an empty Teams slice. +// +// Each login appears at most once: a user who is both directly requested and a member of one +// or more requested teams aggregates into a single entry whose Teams slice records every team +// they were also part of (used as a tie-break only when the user-scoped lookup is empty). +type reviewerRequest struct { + Login string + Teams []graphql.DigestTeamRef +} + +// gatherReviewersForPR collapses a PR's directly-requested users and all members of its +// requested teams into a deduplicated reviewer set, preserving each reviewer's team origin so +// the SLA backfill can use a team-scoped review_requested event when no user-scoped event is +// available. Logins are matched case-insensitively for deduplication; the first-seen casing is +// preserved on the returned struct. +func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTeamRef) []string) []reviewerRequest { + byLogin := make(map[string]int) + out := make([]reviewerRequest, 0, len(pr.RequestedUsers)) + add := func(login string, team *graphql.DigestTeamRef) { + if login == "" { + return + } + key := strings.ToLower(login) + idx, ok := byLogin[key] + if !ok { + out = append(out, reviewerRequest{Login: login}) + idx = len(out) - 1 + byLogin[key] = idx + } + if team != nil { + out[idx].Teams = append(out[idx].Teams, *team) + } + } + for _, login := range pr.RequestedUsers { + add(login, nil) + } for _, team := range pr.RequestedTeams { - logins = append(logins, resolveTeam(team)...) + team := team + for _, login := range resolveTeam(team) { + add(login, &team) + } } - return logins + return out } // newDigestSLAStartResolver returns a closure that resolves a reviewer's SLA-start time for -// the digest, with two layers of caching: +// the digest, with three layers of lookup: // // 1. The KV record from the live review_requested webhook (free). -// 2. A timeline backfill, fetched at most once per PR per digest run and shared across -// reviewers on the same PR. Resolved times are written back to KV so subsequent runs hit -// the fast path. +// 2. A user-scoped review_requested event from the PR timeline. Timeline pages are fetched +// at most once per PR per digest run and shared across reviewers on the same PR. +// 3. For reviewers added via team membership only, the earliest surviving team-scoped +// review_requested event. This avoids overstating overdue days for users who are pending +// solely because their team was requested. // -// Falls back to the PR open time when no review_requested event is found, matching the -// read-only effectiveReviewSLAStart contract. +// Resolved times (from either timeline path) are written back to KV under the user-scoped +// key so subsequent runs hit the fast path. Falls back to the PR open time when no +// review_requested event is found, matching the read-only effectiveReviewSLAStart contract. // // Also returns a summarize() callback the caller must invoke once the resolver is no longer // in use (typically via defer). It logs cumulative cache hit / timeline-fetch counts at Info // level so admins can verify the backfill is healthy across runs (see Day-1-vs-steady-state -// patterns in the docs); calling it before the resolver runs would log misleading zeroes, -// which is why this isn't wired up via defer inside this constructor. -func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Client) (resolve func(prRef, string) github.Timestamp, summarize func()) { +// patterns in the docs). The callback is a no-op when neither counter moved, so digests that +// reach this point but never actually call the resolver (e.g. PRs with no requested +// reviewers) don't emit a misleading "0,0" line. +func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Client) (resolve func(prRef, reviewerRequest) github.Timestamp, summarize func()) { type prKey struct { owner string repo string @@ -283,11 +344,11 @@ func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Clien timelineFetched := make(map[prKey]bool) var fetched, hits int - resolve = func(pr prRef, login string) github.Timestamp { + resolve = func(pr prRef, rr reviewerRequest) github.Timestamp { if pr.Owner == "" || pr.Repo == "" || pr.Number == 0 { return pr.CreatedAt } - if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, login); !t.IsZero() { + if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, rr.Login); !t.IsZero() { hits++ return github.Timestamp{Time: t} } @@ -310,15 +371,23 @@ func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Clien fetched++ } - found := findMostRecentReviewRequestTime(events, login) - if found.IsZero() { - return pr.CreatedAt + if found := findMostRecentReviewRequestTime(events, rr.Login); !found.IsZero() { + p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, rr.Login, found) + return github.Timestamp{Time: found.UTC()} } - p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, login, found) - return github.Timestamp{Time: found.UTC()} + // No user-scoped event. For team-membership requests, anchor to the earliest still- + // surviving team request: the user has been on the hook since the first such ask. + if found := findEarliestSurvivingTeamRequestTime(events, rr.Teams); !found.IsZero() { + p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, rr.Login, found) + return github.Timestamp{Time: found.UTC()} + } + return pr.CreatedAt } summarize = func() { + if fetched == 0 && hits == 0 { + return + } p.client.Log.Info("SLA digest backfill summary", "timeline_pages_fetched", fetched, "kv_hits", hits) @@ -327,35 +396,36 @@ func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Clien return resolve, summarize } -// evaluateOverdueForReviewer returns a digest entry for (pr, login) when the reviewer is past -// SLA, or nil when not overdue, already accounted for, or missing identity. seen is mutated -// to record the (pr, login) pair so duplicate review requests across team membership and -// direct user-request don't double-count. +// evaluateOverdueForReviewer returns a digest entry for (pr, reviewer) when the reviewer is +// past SLA, or nil when not overdue, already accounted for, or missing identity. seen is +// mutated to record the (pr, login) pair. gatherReviewersForPR already deduplicates a single +// PR's reviewer set, but seen guards against any unexpected duplicates that slip through +// (e.g. case-mismatched names from different code paths). func (p *Plugin) evaluateOverdueForReviewer( ref prRef, pr graphql.DigestPR, - login string, + rr reviewerRequest, targetDays int, now time.Time, seen map[string]bool, - resolveSLAStart func(prRef, string) github.Timestamp, + resolveSLAStart func(prRef, reviewerRequest) github.Timestamp, ) *slaDigestEntry { - if login == "" { + if rr.Login == "" { return nil } - dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(login) + dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(rr.Login) if seen[dedupeKey] { return nil } seen[dedupeKey] = true - slaStart := resolveSLAStart(ref, login) + slaStart := resolveSLAStart(ref, rr) diff := slaCalendarDiffDays(slaStart, targetDays, now) if diff >= 0 { return nil } - reviewerDisplay := p.resolveReviewerDisplayName(login) + reviewerDisplay := p.resolveReviewerDisplayName(rr.Login) line := formatChannelOverdueReviewLine(reviewerDisplay, pr.Title, pr.URL, p.getConfiguration().getBaseURL()) return &slaDigestEntry{DaysOverdue: -diff, Line: line} } diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index c3c643c44..66ab43ab2 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -181,18 +181,30 @@ func TestBuildSLADigestMessage(t *testing.T) { } func TestGatherReviewersForPR(t *testing.T) { - t.Run("flattens direct user requests", func(t *testing.T) { + logins := func(rrs []reviewerRequest) []string { + out := make([]string, 0, len(rrs)) + for _, r := range rrs { + out = append(out, r.Login) + } + return out + } + + t.Run("flattens direct user requests with empty Teams", func(t *testing.T) { pr := graphql.DigestPR{ RequestedUsers: []string{"alice", "bob"}, } got := gatherReviewersForPR(pr, func(graphql.DigestTeamRef) []string { return nil }) - assert.Equal(t, []string{"alice", "bob"}, got) + assert.Equal(t, []string{"alice", "bob"}, logins(got)) + for _, r := range got { + assert.Empty(t, r.Teams, "direct request should carry no team origin") + } }) - t.Run("expands team references through the resolver", func(t *testing.T) { + t.Run("expands team references and tags reviewer with the originating team", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} pr := graphql.DigestPR{ RequestedUsers: []string{"alice"}, - RequestedTeams: []graphql.DigestTeamRef{{Org: "mattermost", Slug: "core"}}, + RequestedTeams: []graphql.DigestTeamRef{core}, } resolver := func(team graphql.DigestTeamRef) []string { if team.Slug == "core" { @@ -201,7 +213,36 @@ func TestGatherReviewersForPR(t *testing.T) { return nil } got := gatherReviewersForPR(pr, resolver) - assert.Equal(t, []string{"alice", "bob", "carol"}, got) + assert.Equal(t, []string{"alice", "bob", "carol"}, logins(got)) + assert.Empty(t, got[0].Teams, "alice was directly requested, not via team") + assert.Equal(t, []graphql.DigestTeamRef{core}, got[1].Teams) + assert.Equal(t, []graphql.DigestTeamRef{core}, got[2].Teams) + }) + + t.Run("user in both direct list and team list is deduplicated to one entry retaining team origin", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"bob"}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1, "bob should be merged into a single entry") + assert.Equal(t, "bob", got[0].Login) + assert.Equal(t, []graphql.DigestTeamRef{core}, got[0].Teams, + "team origin must survive deduplication so the SLA backfill can use the team request as a fallback") + }) + + t.Run("user requested via two teams accumulates both teams", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + platform := graphql.DigestTeamRef{Org: "mattermost", Slug: "platform"} + pr := graphql.DigestPR{ + RequestedTeams: []graphql.DigestTeamRef{core, platform}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1) + assert.ElementsMatch(t, []graphql.DigestTeamRef{core, platform}, got[0].Teams) }) t.Run("does not mutate the underlying RequestedUsers slice", func(t *testing.T) { @@ -219,6 +260,18 @@ func TestGatherReviewersForPR(t *testing.T) { got := gatherReviewersForPR(graphql.DigestPR{}, func(graphql.DigestTeamRef) []string { return nil }) assert.Empty(t, got) }) + + t.Run("dedupe is case-insensitive on login", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"Bob"}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1) + assert.Equal(t, "Bob", got[0].Login, "first-seen casing wins") + }) } func TestPickServiceGitHubUser_DeterministicOrdering(t *testing.T) { @@ -343,7 +396,7 @@ func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { Repo: "repo", Number: 1, CreatedAt: github.Timestamp{Time: time.Now().UTC()}, - }, "alice") + }, reviewerRequest{Login: "alice"}) require.Equal(t, 0, logCalls, "summary must not fire on resolver invocation") require.True(t, got.UTC().Equal(cachedTime), "resolver should return the cached SLA start time; got %v want %v", got.UTC(), cachedTime) @@ -354,6 +407,28 @@ func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { assert.Equal(t, 0, loggedFetches, "no timeline pages fetched on the cached path") } +func TestNewDigestSLAStartResolver_SkipsSummaryLogWhenNoLookups(t *testing.T) { + // summarize() is wired up via defer in collectAllOverdueSLAItems, so it can fire even + // when the resolver was never invoked (e.g. PRs with no requested reviewers, or + // degenerate prRefs that take the early return inside resolve). When no lookups ran, + // the "0,0" summary line is misleading and is intentionally suppressed. + p, _, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + api, ok := p.API.(*plugintest.API) + require.True(t, ok, "expected plugintest.API") + var logCalls int + api.On("LogInfo", + "SLA digest backfill summary", + "timeline_pages_fetched", mock.AnythingOfType("int"), + "kv_hits", mock.AnythingOfType("int"), + ).Run(func(mock.Arguments) { logCalls++ }).Maybe() + + _, summarize := p.newDigestSLAStartResolver(context.Background(), nil) + summarize() + assert.Equal(t, 0, logCalls, "summary must be suppressed when no lookups ran") +} + // setupServiceUserPickTest wires up a Plugin instance with a mocked KvStore and a mock // plugintest.API that swallows the warn-level logs pickServiceGitHubUser emits on failure // paths. Returns the plugin, the KV mock for setting expectations, and the gomock controller diff --git a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx index 66f01e7c6..7ae218778 100644 --- a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx +++ b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx @@ -213,7 +213,10 @@ export default class SidebarButtons extends React.PureComponent { } function reviewButtonStyle(base, reviews, targetDays) { - if (!targetDays) { + // Match getReviewSLAStatus / reviewsHaveOverdue: a non-positive target means SLA + // is not configured. !targetDays alone would let a negative value through and + // produce a misleading green indicator. + if (!targetDays || targetDays <= 0) { return base; } const list = reviews || []; From 06c42ca076a0842d852c8159c127dcbe285ca4ce Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:44:30 -0700 Subject: [PATCH 12/15] MM-68539: drop redundant loop-variable shadow in gatherReviewersForPR golangci-lint's modernize linter (the rule that runs as part of `make check-style`) flagged `team := team` inside the RequestedTeams loop. Go 1.22+ gives every iteration its own loop variable, so the manual capture is no longer needed. The inner `add` call already dereferences `*team` and copies the value into out[idx].Teams, so removing the shadow does not affect aliasing either way. Caught by `make check-style`; the previous commit was checked with go vet / gofmt / ReadLints / eslint only, which all miss golangci-lint's modernize ruleset. --- server/plugin/sla_digest.go | 1 - 1 file changed, 1 deletion(-) diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index c11dc8f69..5682dae14 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -306,7 +306,6 @@ func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTe add(login, nil) } for _, team := range pr.RequestedTeams { - team := team for _, login := range resolveTeam(team) { add(login, &team) } From ef45f4e975e378fc1ca3538b7d50d77a86f7ad11 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Mon, 27 Apr 2026 23:46:41 -0700 Subject: [PATCH 13/15] MM-68539: treat ctx cancellation as 'could not run' in SLA digest CodeRabbit caught that the previous "differentiate could-not-run from nothing-overdue" change still returned ok=true on a context-canceled mid-scan, which would advance slaDigestDayKVKey for an interrupted (partial) walk and suppress retries until the next local day. Two call sites had this hole; both now return (nil, false) on ctx.Err(): 1. collectAllOverdueSLAItems' per-PR loop: a partial entry list is not a real scan, so the caller must not record the day marker. 2. fetchAllOrgOpenPRs' per-org loop: even if some orgs already responded successfully, a cancellation interrupted the walk; anyOK must be false so the caller treats it as an interrupted scan rather than a "real but quiet" one. New test TestFetchAllOrgOpenPRs_CanceledContextReturnsNotOK locks in (a) the (nil, false) contract and (b) that the cancellation path fast-fails without dereferencing the GraphQL client. --- server/plugin/sla_digest.go | 15 ++++++++++----- server/plugin/sla_digest_test.go | 20 ++++++++++++++++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index 5682dae14..40698e6ff 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -195,7 +195,11 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) ([]slaDigestEntr seen := make(map[string]bool) for _, pr := range allPRs { if ctx.Err() != nil { - return out, true + // A canceled context means the scan was interrupted partway through (e.g. + // scheduler shutdown). Whatever we've accumulated is a partial view, so we + // must not let the caller treat it as a real scan and advance the day + // marker. Return ok=false so the next scheduler tick retries. + return nil, false } ref := prRef{ Owner: pr.Owner, @@ -215,13 +219,14 @@ func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) ([]slaDigestEntr // fetchAllOrgOpenPRs runs the org-wide PR search once per configured org, logging and skipping // orgs that fail rather than aborting the whole digest. Returns the combined PR list and a -// flag that is true iff at least one configured org returned successfully (a zero-PR org is -// still a success). The caller uses anyOK to distinguish "every org failed, retry later" from -// "configured orgs are simply quiet." +// flag that is true iff every configured org was visited and at least one returned +// successfully (a zero-PR org is still a success). A context cancellation mid-iteration +// forces anyOK to false: the caller uses anyOK to distinguish a "real but quiet scan" from +// an interrupted one, and a partial walk is the latter even if some orgs already responded. func (p *Plugin) fetchAllOrgOpenPRs(ctx context.Context, graphQLClient *graphql.Client, orgList []string) (allPRs []graphql.DigestPR, anyOK bool) { for _, org := range orgList { if ctx.Err() != nil { - return allPRs, anyOK + return nil, false } prs, err := graphQLClient.GetOpenPRsWithRequestedReviewers(ctx, org) if err != nil { diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index 66ab43ab2..1c9c3b73e 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -407,6 +407,26 @@ func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { assert.Equal(t, 0, loggedFetches, "no timeline pages fetched on the cached path") } +func TestFetchAllOrgOpenPRs_CanceledContextReturnsNotOK(t *testing.T) { + // A canceled context mid-iteration must NOT report anyOK=true to the caller, even if + // earlier orgs in the list had already returned successfully (here we cancel before any + // org runs, which is the strict version: anyOK starts false and must stay false). The + // caller uses this to decide whether to advance slaDigestDayKVKey — an interrupted scan + // should retry on the next scheduler tick, not silently skip until tomorrow. + p, _, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + // graphQLClient is intentionally nil: the ctx.Err() check fires before any GraphQL call, + // so a nil client must never be dereferenced on the cancellation path. Locks in the + // fast-fail-without-network behavior alongside the failure-flag contract. + prs, ok := p.fetchAllOrgOpenPRs(ctx, nil, []string{"mattermost"}) + assert.Nil(t, prs, "canceled context should not surface partial results") + assert.False(t, ok, "canceled context must not be reported as a successful scan") +} + func TestNewDigestSLAStartResolver_SkipsSummaryLogWhenNoLookups(t *testing.T) { // summarize() is wired up via defer in collectAllOverdueSLAItems, so it can fire even // when the resolver was never invoked (e.g. PRs with no requested reviewers, or From ff472cf76bf72da7220d2e602aedb57f2d863e66 Mon Sep 17 00:00:00 2001 From: JG Heithcock Date: Tue, 28 Apr 2026 07:55:16 -0700 Subject: [PATCH 14/15] MM-68539: group digest entries by reviewer within each bucket Before: every overdue (reviewer, PR) pair rendered as its own bullet with the reviewer prefix repeated on every row, so a reviewer with five overdue PRs in a single bucket got @-mentioned five times in that bucket and the digest was a wall of duplicated names. After: within each bucket, entries are grouped by reviewer. Each reviewer appears once as the outer bullet (@-mention is also issued once per bucket), with the per-PR bodies as indented sub-bullets. Reviewers within a bucket sort case-insensitively; PR bodies within a reviewer group sort alphabetically. A reviewer who is overdue in multiple buckets still gets one header per bucket they appear in. #### 1-3 days overdue - @alice (alice-gh) - mattermost/mattermost - [Fix encryption rotation](url) - @harrison (hmhealey) - mattermost/mattermost - [Fix race condition](url) - mattermost/plugin-github - [Add SLA digest](url) Implementation: - slaDigestEntry replaces its single Line field with separate ReviewerDisplay and Body fields so the builder can group without having to parse the pre-baked line back apart. - formatChannelOverdueReviewLine becomes formatChannelOverduePRBody: returns just "/ - [title](url)" so callers compose the outer bullet and the reviewer header. The same body is used under whichever reviewer header it belongs to. - New groupBucketEntriesByReviewer helper handles the per-bucket collation with deterministic ordering (the digest is posted verbatim, so map iteration order alone would leave the output flaky run-to-run). Tests: - TestFormatChannelOverduePRBody retargets the existing line-shape invariants (escape, truncate, fallback URL) onto the new body helper, and adds an explicit "no leading bullet, no reviewer prefix" assertion locking in the composability contract. - TestBuildSLADigestMessage gains: - "multiple PRs by the same reviewer in one bucket render under a single reviewer header with sorted indented bodies" - "two reviewers in the same bucket render as two separate reviewer groups" - direct unit test on groupBucketEntriesByReviewer for ordering and intra-group body sort. --- server/plugin/sla_digest.go | 62 ++++++++++--- server/plugin/sla_digest_test.go | 149 +++++++++++++++++++++---------- server/plugin/utils.go | 16 ++-- 3 files changed, 160 insertions(+), 67 deletions(-) diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index 40698e6ff..ac781a862 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -26,9 +26,13 @@ const ( slaDigestMutexKey = "github_sla_digest_mutex" ) +// slaDigestEntry is one (reviewer, PR) pair past SLA. Stored as separate fields rather than +// a pre-baked single line so buildSLADigestMessage can group entries by reviewer within each +// bucket and avoid repeating the @-mention on every row. type slaDigestEntry struct { - DaysOverdue int - Line string + DaysOverdue int + ReviewerDisplay string // e.g. "@harrison (hmhealey)" or "(not connected) - hmhealey" + Body string // e.g. "mattermost/mattermost - [Fix race condition](url)" } // maybePostDailyOverdueSLADigest posts one aggregated message per local calendar day to the @@ -430,8 +434,8 @@ func (p *Plugin) evaluateOverdueForReviewer( } reviewerDisplay := p.resolveReviewerDisplayName(rr.Login) - line := formatChannelOverdueReviewLine(reviewerDisplay, pr.Title, pr.URL, p.getConfiguration().getBaseURL()) - return &slaDigestEntry{DaysOverdue: -diff, Line: line} + body := formatChannelOverduePRBody(pr.Title, pr.URL, p.getConfiguration().getBaseURL()) + return &slaDigestEntry{DaysOverdue: -diff, ReviewerDisplay: reviewerDisplay, Body: body} } // slaBuckets enumerates the digest's overdue buckets in display order (most overdue first). @@ -469,14 +473,47 @@ func slaBucketIndex(daysOverdue int) int { return -1 } +// reviewerBucketGroup is one reviewer's overdue PRs within a single bucket, used by +// buildSLADigestMessage to render `- \n - \n - ` instead of +// repeating the reviewer prefix on every PR row. +type reviewerBucketGroup struct { + ReviewerDisplay string + Bodies []string +} + +// groupBucketEntriesByReviewer collapses a bucket's entries into one group per reviewer, +// sorts bodies within each group, and sorts the groups themselves by reviewer display +// (case-insensitive). The deterministic ordering is required for stable channel posts and +// snapshot-style assertions; map iteration order alone would leave the output flaky. +func groupBucketEntriesByReviewer(entries []slaDigestEntry) []reviewerBucketGroup { + idxByReviewer := make(map[string]int, len(entries)) + out := make([]reviewerBucketGroup, 0, len(entries)) + for _, e := range entries { + idx, ok := idxByReviewer[e.ReviewerDisplay] + if !ok { + out = append(out, reviewerBucketGroup{ReviewerDisplay: e.ReviewerDisplay}) + idx = len(out) - 1 + idxByReviewer[e.ReviewerDisplay] = idx + } + out[idx].Bodies = append(out[idx].Bodies, e.Body) + } + for i := range out { + sort.Strings(out[i].Bodies) + } + sort.SliceStable(out, func(i, j int) bool { + return strings.ToLower(out[i].ReviewerDisplay) < strings.ToLower(out[j].ReviewerDisplay) + }) + return out +} + func buildSLADigestMessage(entries []slaDigestEntry, targetDays int) string { - bucketLines := make([][]string, len(slaBuckets)) + bucketEntries := make([][]slaDigestEntry, len(slaBuckets)) for _, e := range entries { idx := slaBucketIndex(e.DaysOverdue) if idx < 0 { continue } - bucketLines[idx] = append(bucketLines[idx], e.Line) + bucketEntries[idx] = append(bucketEntries[idx], e) } var b strings.Builder @@ -490,15 +527,16 @@ func buildSLADigestMessage(entries []slaDigestEntry, targetDays int) string { b.WriteString("### Pull request reviews past SLA\n\n") } for i, bucket := range slaBuckets { - lines := bucketLines[i] - if len(lines) == 0 { + bes := bucketEntries[i] + if len(bes) == 0 { continue } - sort.Strings(lines) fmt.Fprintf(&b, "#### %s\n", bucket.label) - for _, line := range lines { - b.WriteString(line) - b.WriteString("\n") + for _, g := range groupBucketEntriesByReviewer(bes) { + fmt.Fprintf(&b, "- %s\n", g.ReviewerDisplay) + for _, body := range g.Bodies { + fmt.Fprintf(&b, " - %s\n", body) + } } b.WriteString("\n") } diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index 1c9c3b73e..27527e094 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -25,61 +25,55 @@ import ( "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" ) -func TestFormatChannelOverdueReviewLine(t *testing.T) { +func TestFormatChannelOverduePRBody(t *testing.T) { const baseURL = "https://github.com/" - t.Run("renders connected reviewer with @-mention and bracketed github login", func(t *testing.T) { - line := formatChannelOverdueReviewLine( - "@harrison (hmhealey)", + t.Run("renders owner/repo and a markdown-linked title", func(t *testing.T) { + body := formatChannelOverduePRBody( "Fix race condition", "https://github.com/mattermost/mattermost/pull/12345", baseURL, ) - assert.Equal(t, "- @harrison (hmhealey) - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) - }) - - t.Run("renders unconnected reviewer with `(not connected) - login` prefix", func(t *testing.T) { - line := formatChannelOverdueReviewLine( - "(not connected) - hmhealey", - "Fix race condition", - "https://github.com/mattermost/mattermost/pull/12345", - baseURL, - ) - assert.Equal(t, "- (not connected) - hmhealey - mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", line) + assert.Equal(t, "mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", body) }) t.Run("escapes closing brackets in the title so the link does not terminate early", func(t *testing.T) { - line := formatChannelOverdueReviewLine( - "hmhealey", + body := formatChannelOverduePRBody( "[MM-12345] Fix [thing]", "https://github.com/mattermost/mattermost/pull/1", baseURL, ) // Only `]` needs escaping inside a markdown link's display text; `[` is allowed. - assert.Contains(t, line, `[[MM-12345\] Fix [thing\]](https://github.com/mattermost/mattermost/pull/1)`) + assert.Contains(t, body, `[[MM-12345\] Fix [thing\]](https://github.com/mattermost/mattermost/pull/1)`) }) t.Run("falls back to raw URL as the repo display when owner/repo cannot be parsed", func(t *testing.T) { - line := formatChannelOverdueReviewLine( - "hmhealey", + body := formatChannelOverduePRBody( "Title", "https://example.invalid/something/odd", baseURL, ) - assert.Contains(t, line, "https://example.invalid/something/odd - [Title]") + assert.Contains(t, body, "https://example.invalid/something/odd - [Title]") }) t.Run("truncates very long titles before linking", func(t *testing.T) { title := strings.Repeat("a", 250) - line := formatChannelOverdueReviewLine( - "hmhealey", + body := formatChannelOverduePRBody( title, "https://github.com/mattermost/mattermost/pull/1", baseURL, ) - assert.Contains(t, line, "...](https://github.com/mattermost/mattermost/pull/1)") - // Title body inside the link should not exceed 200 'a's followed by the ellipsis. - assert.NotContains(t, line, strings.Repeat("a", 201)) + assert.Contains(t, body, "...](https://github.com/mattermost/mattermost/pull/1)") + // Title text inside the link should not exceed 200 'a's followed by the ellipsis. + assert.NotContains(t, body, strings.Repeat("a", 201)) + }) + + t.Run("does not include a leading bullet or reviewer prefix", func(t *testing.T) { + // Locks in the contract that the per-PR body is composable: the digest builder + // is responsible for the `- ` outer bullet and the reviewer header, not this helper. + body := formatChannelOverduePRBody("Fix", "https://github.com/m/m/pull/1", baseURL) + assert.False(t, strings.HasPrefix(body, "- "), "body should be composable, not pre-bulleted") + assert.False(t, strings.HasPrefix(body, "@"), "body should not carry a reviewer prefix") }) } @@ -119,13 +113,17 @@ func TestSLABucketIndex(t *testing.T) { } func TestBuildSLADigestMessage(t *testing.T) { + entry := func(days int, reviewer, body string) slaDigestEntry { + return slaDigestEntry{DaysOverdue: days, ReviewerDisplay: reviewer, Body: body} + } + t.Run("groups entries into the correct buckets in display order", func(t *testing.T) { entries := []slaDigestEntry{ - {DaysOverdue: 2, Line: "- a"}, - {DaysOverdue: 5, Line: "- b"}, - {DaysOverdue: 12, Line: "- c"}, - {DaysOverdue: 100, Line: "- d"}, - {DaysOverdue: 400, Line: "- e"}, + entry(2, "@a (a-gh)", "owner/repo - [A](url)"), + entry(5, "@b (b-gh)", "owner/repo - [B](url)"), + entry(12, "@c (c-gh)", "owner/repo - [C](url)"), + entry(100, "@d (d-gh)", "owner/repo - [D](url)"), + entry(400, "@e (e-gh)", "owner/repo - [E](url)"), } msg := buildSLADigestMessage(entries, 3) @@ -146,37 +144,94 @@ func TestBuildSLADigestMessage(t *testing.T) { t.Run("non-overdue entries are dropped", func(t *testing.T) { entries := []slaDigestEntry{ - {DaysOverdue: 0, Line: "- skipped"}, - {DaysOverdue: -2, Line: "- skipped-too"}, - {DaysOverdue: 1, Line: "- kept"}, + entry(0, "@skip (skip-gh)", "owner/repo - [skipped](url)"), + entry(-2, "@skip2 (skip2-gh)", "owner/repo - [skipped-too](url)"), + entry(1, "@keep (keep-gh)", "owner/repo - [kept](url)"), } msg := buildSLADigestMessage(entries, 3) - assert.Contains(t, msg, "- kept") - assert.NotContains(t, msg, "- skipped") - assert.NotContains(t, msg, "- skipped-too") + assert.Contains(t, msg, "[kept]") + assert.NotContains(t, msg, "[skipped]") + assert.NotContains(t, msg, "[skipped-too]") }) t.Run("singular target days uses 'day'", func(t *testing.T) { - msg := buildSLADigestMessage([]slaDigestEntry{{DaysOverdue: 1, Line: "- x"}}, 1) + msg := buildSLADigestMessage([]slaDigestEntry{entry(1, "@x (x-gh)", "owner/repo - [X](url)")}, 1) assert.Contains(t, msg, "target: 1 day from") }) t.Run("zero target days falls back to plain header", func(t *testing.T) { - msg := buildSLADigestMessage([]slaDigestEntry{{DaysOverdue: 1, Line: "- x"}}, 0) + msg := buildSLADigestMessage([]slaDigestEntry{entry(1, "@x (x-gh)", "owner/repo - [X](url)")}, 0) assert.True(t, strings.HasPrefix(msg, "### Pull request reviews past SLA\n")) }) - t.Run("lines within a bucket are sorted alphabetically", func(t *testing.T) { + t.Run("reviewers within a bucket are sorted alphabetically (case-insensitive)", func(t *testing.T) { + entries := []slaDigestEntry{ + entry(2, "@Zeta (zeta-gh)", "owner/repo - [PR-z](url)"), + entry(2, "@alpha (alpha-gh)", "owner/repo - [PR-a](url)"), + entry(2, "@Mu (mu-gh)", "owner/repo - [PR-m](url)"), + } + msg := buildSLADigestMessage(entries, 3) + ai := strings.Index(msg, "@alpha") + mi := strings.Index(msg, "@Mu") + zi := strings.Index(msg, "@Zeta") + assert.True(t, ai >= 0 && mi > ai && zi > mi, "expected alpha < Mu < Zeta (case-insensitive) in output") + }) + + t.Run("multiple PRs by the same reviewer in one bucket render under a single reviewer header with sorted indented bodies", func(t *testing.T) { + const reviewer = "@harrison (hmhealey)" + entries := []slaDigestEntry{ + entry(2, reviewer, "owner/repo - [zeta-pr](https://example/pr/3)"), + entry(2, reviewer, "owner/repo - [alpha-pr](https://example/pr/1)"), + entry(2, reviewer, "owner/repo - [mu-pr](https://example/pr/2)"), + } + msg := buildSLADigestMessage(entries, 3) + + // The reviewer header must appear EXACTLY once in this bucket — that's the whole + // point of grouping; otherwise the digest still @-spams the reviewer per-PR. + bucketStart := strings.Index(msg, "#### 1-3 days overdue") + require.True(t, bucketStart >= 0, "expected the 1-3 days bucket header") + bucketSlice := msg[bucketStart:] + assert.Equal(t, 1, strings.Count(bucketSlice, "- "+reviewer+"\n"), + "reviewer should appear once per bucket as the outer bullet") + + // Bodies should be indented two spaces and sorted alphabetically within the group. + ai := strings.Index(bucketSlice, " - owner/repo - [alpha-pr]") + mi := strings.Index(bucketSlice, " - owner/repo - [mu-pr]") + zi := strings.Index(bucketSlice, " - owner/repo - [zeta-pr]") + assert.True(t, ai > 0 && mi > ai && zi > mi, + "PR bodies should be indented (` - `) and sorted alphabetically under the reviewer header") + }) + + t.Run("two reviewers in the same bucket render as two separate reviewer groups", func(t *testing.T) { entries := []slaDigestEntry{ - {DaysOverdue: 2, Line: "- zeta"}, - {DaysOverdue: 2, Line: "- alpha"}, - {DaysOverdue: 2, Line: "- mu"}, + entry(2, "@alice (alice-gh)", "o/r - [a-pr](url)"), + entry(2, "@alice (alice-gh)", "o/r - [b-pr](url)"), + entry(2, "@bob (bob-gh)", "o/r - [c-pr](url)"), } msg := buildSLADigestMessage(entries, 3) - ai := strings.Index(msg, "- alpha") - mi := strings.Index(msg, "- mu") - zi := strings.Index(msg, "- zeta") - assert.True(t, ai >= 0 && mi > ai && zi > mi, "expected alpha < mu < zeta in output") + bucketStart := strings.Index(msg, "#### 1-3 days overdue") + require.True(t, bucketStart >= 0) + bucket := msg[bucketStart:] + + // Each reviewer header appears exactly once; alice's two bodies are nested under hers. + assert.Equal(t, 1, strings.Count(bucket, "- @alice (alice-gh)\n")) + assert.Equal(t, 1, strings.Count(bucket, "- @bob (bob-gh)\n")) + assert.Contains(t, bucket, "- @alice (alice-gh)\n - o/r - [a-pr](url)\n - o/r - [b-pr](url)\n") + }) + + t.Run("groupBucketEntriesByReviewer is deterministic and sorts bodies within a group", func(t *testing.T) { + // Direct unit test on the helper: more focused than scanning the full message. + bucketEntries := []slaDigestEntry{ + {ReviewerDisplay: "@bob (bob-gh)", Body: "o/r - [b1](url)"}, + {ReviewerDisplay: "@alice (alice-gh)", Body: "o/r - [zeta](url)"}, + {ReviewerDisplay: "@alice (alice-gh)", Body: "o/r - [alpha](url)"}, + } + groups := groupBucketEntriesByReviewer(bucketEntries) + require.Len(t, groups, 2) + assert.Equal(t, "@alice (alice-gh)", groups[0].ReviewerDisplay) + assert.Equal(t, []string{"o/r - [alpha](url)", "o/r - [zeta](url)"}, groups[0].Bodies, + "bodies inside a reviewer group should be sorted alphabetically") + assert.Equal(t, "@bob (bob-gh)", groups[1].ReviewerDisplay) }) } diff --git a/server/plugin/utils.go b/server/plugin/utils.go index 04b6bea1e..4f590aa6f 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -382,13 +382,13 @@ func slaCalendarDiffDays(createdAt github.Timestamp, targetDays int, now time.Ti return int(dueDay.Sub(todayDay) / (24 * time.Hour)) } -// formatChannelOverdueReviewLine formats a single line for the overdue-SLA channel digest. -// reviewerDisplay is a pre-resolved name for the reviewer column (e.g. "@harrison (hmhealey)" for -// connected users or "(not connected) - hmhealey" for users without a Mattermost mapping). -// The title is rendered as a markdown link to htmlURL so readers can click straight to the PR. -// When owner/repo cannot be parsed from htmlURL, the raw URL is used as the repo display so -// nothing renders as " / ". -func formatChannelOverdueReviewLine(reviewerDisplay, title, htmlURL, baseURL string) string { +// formatChannelOverduePRBody formats the per-PR portion of an overdue-SLA digest line: +// "/ - [](<url>)". The reviewer column and list-bullet prefix are +// composed by the caller so the digest can group several of a reviewer's overdue PRs under +// a single reviewer header without duplicating their @-mention on every row. When owner/repo +// cannot be parsed from htmlURL the raw URL is used as the repo display, so nothing renders +// as a bare " / ". Long titles are truncated to 200 chars + "...". +func formatChannelOverduePRBody(title, htmlURL, baseURL string) string { owner, repo := parseOwnerAndRepo(htmlURL, baseURL) repoDisplay := fmt.Sprintf("%s/%s", owner, repo) if owner == "" || repo == "" { @@ -401,7 +401,7 @@ func formatChannelOverdueReviewLine(reviewerDisplay, title, htmlURL, baseURL str if htmlURL != "" { titleDisplay = fmt.Sprintf("[%s](%s)", escapeMarkdownLinkText(title), htmlURL) } - return fmt.Sprintf("- %s - %s - %s", reviewerDisplay, repoDisplay, titleDisplay) + return fmt.Sprintf("%s - %s", repoDisplay, titleDisplay) } // escapeMarkdownLinkText escapes characters that would break a markdown link's display text. From 95ffb55be2c844ac86ea9d3807733ffb3757b52d Mon Sep 17 00:00:00 2001 From: JG Heithcock <jgheithcock@gmail.com> Date: Tue, 28 Apr 2026 09:17:52 -0700 Subject: [PATCH 15/15] MM-68539: pin gatherReviewersForPR empty-login invariant via test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address @nang2049's nit on PR #999 about the empty-login guard being implicit. The current code is correct: both call paths (pr.RequestedUsers and team-expanded logins) feed the shared add() closure which has `if login == "" { return }` at the top, AND the GraphQL layer in digest_query.go already drops empty user logins upstream. But the protection isn't visible at the loop sites, and there was no test pinning the contract down — a future refactor that splits the closure or replaces the helper could quietly drop the guard. - Comment on add() now spells out that it's the single chokepoint for both paths and explains why the guard belongs there rather than being duplicated at each loop body. - New test "empty logins are dropped from both direct and team- expanded paths" exercises empty strings on each input vector at once and asserts no reviewerRequest with Login=="" can reach the caller. If a future change removes the guard from add() or puts it in only one of the two loops, this test breaks loudly. --- server/plugin/sla_digest.go | 5 +++++ server/plugin/sla_digest_test.go | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index ac781a862..a14a1d330 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -296,6 +296,11 @@ type reviewerRequest struct { func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTeamRef) []string) []reviewerRequest { byLogin := make(map[string]int) out := make([]reviewerRequest, 0, len(pr.RequestedUsers)) + // add is the single chokepoint for both call paths below (direct RequestedUsers and + // team-expanded logins). The empty-login guard belongs here, not at the loop sites, + // because it must apply uniformly to both: the GraphQL layer (digest_query.go) already + // drops empty user logins, but a third-party resolveTeam implementation could surface + // them and we'd still want them filtered before they leak into out[idx].Login. add := func(login string, team *graphql.DigestTeamRef) { if login == "" { return diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go index 27527e094..196ea5850 100644 --- a/server/plugin/sla_digest_test.go +++ b/server/plugin/sla_digest_test.go @@ -316,6 +316,25 @@ func TestGatherReviewersForPR(t *testing.T) { assert.Empty(t, got) }) + t.Run("empty logins are dropped from both direct and team-expanded paths", func(t *testing.T) { + // Locks in the invariant that no reviewerRequest with Login=="" can reach the + // caller, regardless of which code path a bad value comes in on. The GraphQL + // layer already filters empty user logins, but resolveTeam is a callback whose + // implementation we don't control, so the protection has to apply uniformly. + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"", "alice", ""}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"", "bob", ""} } + got := gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice", "bob"}, logins(got), + "empty logins must be dropped from both pr.RequestedUsers and team-expanded results") + for _, r := range got { + assert.NotEmpty(t, r.Login, "no reviewerRequest may reach the caller with an empty login") + } + }) + t.Run("dedupe is case-insensitive on login", func(t *testing.T) { core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} pr := graphql.DigestPR{