diff --git a/server/plugin/graphql/digest_query.go b/server/plugin/graphql/digest_query.go new file mode 100644 index 000000000..6ab3eb4f0 --- /dev/null +++ b/server/plugin/graphql/digest_query.go @@ -0,0 +1,137 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package graphql + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/shurcooL/githubv4" +) + +// DigestPR is a single open non-draft PR returned by GetOpenPRsWithRequestedReviewers, +// flattened from the GraphQL search response so callers don't need to know about githubv4. +type DigestPR struct { + Owner string + Repo string + Number int + Title string + URL string + CreatedAt time.Time + RequestedUsers []string + RequestedTeams []DigestTeamRef +} + +// DigestTeamRef identifies a team review request that the caller can expand to member logins. +type DigestTeamRef struct { + Org string // organization login + Slug string // team slug +} + +type digestRequestedReviewer struct { + Type githubv4.String `graphql:"__typename"` + User struct { + Login githubv4.String + } `graphql:"... on User"` + Team struct { + Slug githubv4.String + Organization struct { + Login githubv4.String + } + } `graphql:"... on Team"` +} + +type digestPRSearchNode struct { + PullRequest struct { + Number githubv4.Int + Title githubv4.String + URL githubv4.URI + CreatedAt githubv4.DateTime + Repository struct { + Name githubv4.String + Owner struct { + Login githubv4.String + } + } + ReviewRequests struct { + Nodes []struct { + RequestedReviewer digestRequestedReviewer + } + } `graphql:"reviewRequests(first:100)"` + } `graphql:"... on PullRequest"` +} + +// orgOpenPRsSearchQuery is the response shape for GetOpenPRsWithRequestedReviewers. Defined +// at package scope so the graphql library can reflect on its tags, but instantiated locally +// per call so concurrent callers (e.g. the digest scheduler running alongside an LHS fetch) +// don't share mutable state. +type orgOpenPRsSearchQuery struct { + Search struct { + Nodes []digestPRSearchNode + PageInfo struct { + EndCursor githubv4.String + HasNextPage bool + } + } `graphql:"search(first:100, after:$cursor, query:$query, type:ISSUE)"` +} + +// GetOpenPRsWithRequestedReviewers returns every open non-draft PR in org along with the +// users and teams currently requested for review on each one. Pages through the GitHub +// search API at 100 PRs per call. +func (c *Client) GetOpenPRsWithRequestedReviewers(ctx context.Context, org string) ([]DigestPR, error) { + if org == "" { + return nil, errors.New("org is required for org-wide PR search") + } + + query := fmt.Sprintf("is:pr is:open archived:false draft:false org:%s", org) + params := map[string]any{ + "query": githubv4.String(query), + "cursor": (*githubv4.String)(nil), + } + + var orgOpenPRsQuery orgOpenPRsSearchQuery + var out []DigestPR + for { + if err := c.executeQuery(ctx, &orgOpenPRsQuery, params); err != nil { + return nil, errors.Wrapf(err, "org-wide PR search failed for org %q", org) + } + + for _, node := range orgOpenPRsQuery.Search.Nodes { + pr := DigestPR{ + Owner: string(node.PullRequest.Repository.Owner.Login), + Repo: string(node.PullRequest.Repository.Name), + Number: int(node.PullRequest.Number), + Title: string(node.PullRequest.Title), + URL: node.PullRequest.URL.String(), + CreatedAt: node.PullRequest.CreatedAt.Time, + } + for _, rr := range node.PullRequest.ReviewRequests.Nodes { + switch string(rr.RequestedReviewer.Type) { + case "User": + if login := string(rr.RequestedReviewer.User.Login); login != "" { + pr.RequestedUsers = append(pr.RequestedUsers, login) + } + case "Team": + orgLogin := string(rr.RequestedReviewer.Team.Organization.Login) + slug := string(rr.RequestedReviewer.Team.Slug) + if orgLogin == "" { + orgLogin = org + } + if slug != "" { + pr.RequestedTeams = append(pr.RequestedTeams, DigestTeamRef{Org: orgLogin, Slug: slug}) + } + } + } + out = append(out, pr) + } + + if !orgOpenPRsQuery.Search.PageInfo.HasNextPage { + break + } + params["cursor"] = githubv4.NewString(orgOpenPRsQuery.Search.PageInfo.EndCursor) + } + return out, nil +} diff --git a/server/plugin/plugin.go b/server/plugin/plugin.go index 7c2035c02..2c0ff42ad 100644 --- a/server/plugin/plugin.go +++ b/server/plugin/plugin.go @@ -1137,7 +1137,7 @@ func (p *Plugin) GetToDo(ctx context.Context, info *GitHubUserInfo, githubClient for _, pr := range issueResults.Issues { line := strings.TrimSuffix(getToDoDisplayText(baseURL, pr.GetTitle(), pr.GetHTMLURL(), "", nil), "\n") - slaStart := p.effectiveReviewSLAStart(pr, baseURL, info.GitHubUsername) + slaStart := p.effectiveReviewSLAStart(prRefFromIssue(pr, baseURL), info.GitHubUsername) if suffix, _ := reviewSLAMarkdown(slaStart, targetDays, now); suffix != "" { line += suffix } diff --git a/server/plugin/review_sla.go b/server/plugin/review_sla.go index ee83b39f3..eaa4e01ea 100644 --- a/server/plugin/review_sla.go +++ b/server/plugin/review_sla.go @@ -4,8 +4,10 @@ package plugin import ( + "context" "crypto/sha256" "encoding/hex" + "sort" "strconv" "strings" "time" @@ -17,6 +19,29 @@ import ( const slaReviewReqKeyPrefix = "slarr_v1_" +// prRef is the minimal PR identity the SLA code needs: stable enough to derive the KV key, +// rich enough to fall back to PR open time when no review-request record exists. Centralizing +// this avoids forging *github.Issue values whenever a non-search caller (e.g. the digest) +// needs to ask SLA questions. +type prRef struct { + Owner string + Repo string + Number int + CreatedAt github.Timestamp +} + +// prRefFromIssue extracts a prRef from a search-result issue. Owner/repo come from the API +// response when present, otherwise from the HTML URL (mirrors issueOwnerRepo's behavior). +func prRefFromIssue(pr *github.Issue, baseURL string) prRef { + owner, repo := issueOwnerRepo(pr, baseURL) + return prRef{ + Owner: owner, + Repo: repo, + Number: pr.GetNumber(), + CreatedAt: pr.GetCreatedAt(), + } +} + // reviewSLAStartKey returns a stable KV key for (repo, PR, requested reviewer login). func reviewSLAStartKey(owner, repo string, prNumber int, githubLogin string) string { normalized := strings.ToLower(strings.TrimSpace(owner)) + "/" + strings.ToLower(strings.TrimSpace(repo)) + @@ -37,9 +62,14 @@ func (p *Plugin) recordReviewRequestSLAStart(event *github.PullRequestEvent, req if owner == "" || repo == "" || num == 0 || requestedGitHubLogin == "" { return } - key := reviewSLAStartKey(owner, repo, num, requestedGitHubLogin) - at := time.Now().UTC() - val := []byte(at.Format(time.RFC3339Nano)) + p.storeReviewSLAStart(owner, repo, num, requestedGitHubLogin, time.Now().UTC()) +} + +// storeReviewSLAStart writes a review-request timestamp to KV under the canonical key. Used by +// both the live webhook path and the digest's timeline backfill so the wire format matches. +func (p *Plugin) storeReviewSLAStart(owner, repo string, prNumber int, githubLogin string, t time.Time) { + key := reviewSLAStartKey(owner, repo, prNumber, githubLogin) + val := []byte(t.UTC().Format(time.RFC3339Nano)) if _, err := p.store.Set(key, val); err != nil { p.client.Log.Warn("Failed to store review SLA start time", "key", key, "error", err.Error()) } @@ -101,18 +131,147 @@ func issueOwnerRepo(pr *github.Issue, baseURL string) (owner, repo string) { return parseOwnerAndRepo(pr.GetHTMLURL(), baseURL) } -// effectiveReviewSLAStart returns the timestamp used for SLA: when we recorded a review_request webhook -// for this reviewer on this PR, else the PR created time. -func (p *Plugin) effectiveReviewSLAStart(pr *github.Issue, baseURL, reviewerGitHubLogin string) github.Timestamp { - owner, repo := issueOwnerRepo(pr, baseURL) - num := pr.GetNumber() - if owner == "" || repo == "" || num == 0 { - return pr.GetCreatedAt() +// effectiveReviewSLAStart returns the timestamp used for SLA: when we recorded a review_request +// webhook for this reviewer on this PR, else the PR created time. Read-only — callers serving +// user-facing requests (/github todo, RHS) can call freely without network I/O. +func (p *Plugin) effectiveReviewSLAStart(pr prRef, reviewerGitHubLogin string) github.Timestamp { + if pr.Owner == "" || pr.Repo == "" || pr.Number == 0 { + return pr.CreatedAt } - if t := p.getReviewSLAStartTime(owner, repo, num, reviewerGitHubLogin); !t.IsZero() { + if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, reviewerGitHubLogin); !t.IsZero() { return github.Timestamp{Time: t} } - return pr.GetCreatedAt() + return pr.CreatedAt +} + +// findMostRecentReviewRequestTime walks PR timeline events chronologically and returns the +// timestamp of the most recent surviving review_requested event for githubLogin. Returns the +// zero time if the user has no current pending request (e.g. the request was later removed +// without being re-requested). +func findMostRecentReviewRequestTime(events []*github.Timeline, githubLogin string) time.Time { + target := strings.ToLower(strings.TrimSpace(githubLogin)) + if target == "" { + return time.Time{} + } + + // Defensive sort by CreatedAt ascending; GitHub typically returns events in chronological + // order already, but pagination joins are not guaranteed to be ordered across pages. + sorted := make([]*github.Timeline, 0, len(events)) + for _, e := range events { + if e == nil || e.CreatedAt == nil { + continue + } + sorted = append(sorted, e) + } + sort.SliceStable(sorted, func(i, j int) bool { + return sorted[i].CreatedAt.Before(sorted[j].CreatedAt.Time) + }) + + var current time.Time + for _, e := range sorted { + if e.Reviewer == nil { + continue + } + if strings.ToLower(e.Reviewer.GetLogin()) != target { + continue + } + switch e.GetEvent() { + case "review_requested": + current = e.CreatedAt.Time + case "review_request_removed": + current = time.Time{} + } + } + return current +} + +// findEarliestSurvivingTeamRequestTime walks PR timeline events and returns the earliest +// surviving review_requested event time across any of the given teams. "Surviving" means +// not later cancelled by a matching review_request_removed event for the same team. Returns +// the zero time when no team has a still-active request (or when teams is empty). +// +// Used as a fallback for reviewers added solely via team membership: their user-scoped +// timeline doesn't contain a review_requested event, so without this they'd fall all the way +// back to the PR's created_at and overstate days-overdue. The earliest still-active team +// request is the right anchor: if a user is in two requested teams, they have been on the +// hook since the first ask, and a later team request doesn't reset that clock. +// +// Match is on team slug only. A PR's timeline lives within a single GitHub org, and team +// slugs are unique within an org, so cross-org slug collision isn't possible here. +func findEarliestSurvivingTeamRequestTime(events []*github.Timeline, teams []graphql.DigestTeamRef) time.Time { + if len(teams) == 0 { + return time.Time{} + } + wantedSlugs := make(map[string]bool, len(teams)) + for _, t := range teams { + slug := strings.ToLower(strings.TrimSpace(t.Slug)) + if slug != "" { + wantedSlugs[slug] = true + } + } + if len(wantedSlugs) == 0 { + return time.Time{} + } + + sorted := make([]*github.Timeline, 0, len(events)) + for _, e := range events { + if e == nil || e.CreatedAt == nil { + continue + } + sorted = append(sorted, e) + } + sort.SliceStable(sorted, func(i, j int) bool { + return sorted[i].CreatedAt.Before(sorted[j].CreatedAt.Time) + }) + + surviving := make(map[string]time.Time) + for _, e := range sorted { + if e.RequestedTeam == nil { + continue + } + slug := strings.ToLower(e.RequestedTeam.GetSlug()) + if !wantedSlugs[slug] { + continue + } + switch e.GetEvent() { + case "review_requested": + surviving[slug] = e.CreatedAt.Time + case "review_request_removed": + delete(surviving, slug) + } + } + + var earliest time.Time + for _, t := range surviving { + if earliest.IsZero() || t.Before(earliest) { + earliest = t + } + } + return earliest +} + +// fetchPRTimeline returns every timeline event for (owner, repo, prNumber), paging until done. +// Pulled out of the digest's backfill so it can be cached at a higher level (one fetch per PR +// even when many reviewers on the same PR need backfilling). +func fetchPRTimeline(ctx context.Context, gh *github.Client, owner, repo string, prNumber int) ([]*github.Timeline, error) { + if gh == nil || owner == "" || repo == "" || prNumber == 0 { + return nil, nil + } + + var events []*github.Timeline + opts := &github.ListOptions{PerPage: 100} + for { + page, resp, err := gh.Issues.ListIssueTimeline(ctx, owner, repo, prNumber, opts) + if err != nil { + return nil, err + } + events = append(events, page...) + if resp == nil || resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + return events, nil } // enrichReviewsWithSLAStart sets review_sla_start on LHS review items so the webapp can match server SLA logic. @@ -126,7 +285,7 @@ func (p *Plugin) enrichReviewsWithSLAStart(reviews []*graphql.GithubPRDetails, r if d == nil || d.Issue == nil { continue } - eff := p.effectiveReviewSLAStart(d.Issue, baseURL, reviewerLogin) + eff := p.effectiveReviewSLAStart(prRefFromIssue(d.Issue, baseURL), reviewerLogin) if eff.IsZero() { continue } diff --git a/server/plugin/review_sla_test.go b/server/plugin/review_sla_test.go index 55dec9e08..562d8c8af 100644 --- a/server/plugin/review_sla_test.go +++ b/server/plugin/review_sla_test.go @@ -5,8 +5,12 @@ package plugin import ( "testing" + "time" + "github.com/google/go-github/v54/github" "github.com/stretchr/testify/assert" + + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" ) func TestReviewSLAStartKeyStable(t *testing.T) { @@ -17,3 +21,229 @@ func TestReviewSLAStartKeyStable(t *testing.T) { k3 := reviewSLAStartKey("Mattermost", "mattermost", 99999, "octocat") assert.NotEqual(t, k1, k3) } + +func TestPrRefFromIssue(t *testing.T) { + const baseURL = "https://github.com/" + createdAt := github.Timestamp{Time: time.Date(2026, 4, 20, 10, 0, 0, 0, time.UTC)} + + t.Run("prefers explicit Repository fields", func(t *testing.T) { + pr := &github.Issue{ + Number: github.Int(42), + HTMLURL: github.String("https://github.com/wrong/wrong/pull/42"), + CreatedAt: &createdAt, + Repository: &github.Repository{ + Name: github.String("right"), + Owner: &github.User{Login: github.String("owner")}, + }, + } + got := prRefFromIssue(pr, baseURL) + assert.Equal(t, prRef{Owner: "owner", Repo: "right", Number: 42, CreatedAt: createdAt}, got) + }) + + t.Run("falls back to HTMLURL parsing when Repository is missing", func(t *testing.T) { + pr := &github.Issue{ + Number: github.Int(7), + HTMLURL: github.String("https://github.com/mattermost/plugin-github/pull/7"), + CreatedAt: &createdAt, + } + got := prRefFromIssue(pr, baseURL) + assert.Equal(t, prRef{Owner: "mattermost", Repo: "plugin-github", Number: 7, CreatedAt: createdAt}, got) + }) +} + +func TestFindMostRecentReviewRequestTime(t *testing.T) { + user := func(login string) *github.User { return &github.User{Login: github.String(login)} } + at := func(s string) *github.Timestamp { + ts, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("bad timestamp %q: %v", s, err) + } + return &github.Timestamp{Time: ts} + } + ev := func(name, login, ts string) *github.Timeline { + return &github.Timeline{ + Event: github.String(name), + Reviewer: user(login), + CreatedAt: at(ts), + } + } + + t.Run("no events returns zero", func(t *testing.T) { + got := findMostRecentReviewRequestTime(nil, "octocat") + assert.True(t, got.IsZero()) + }) + + t.Run("single review_requested returns its timestamp", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("login match is case-insensitive", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "OctoCat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("review_request_removed after request invalidates pending", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.True(t, got.IsZero(), "removed request should leave no pending start") + }) + + t.Run("re-request after remove uses the most recent request", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-22T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("events for other reviewers are ignored", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "alice", "2026-04-25T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("out-of-order pages are sorted defensively", func(t *testing.T) { + events := []*github.Timeline{ + ev("review_requested", "octocat", "2026-04-22T10:00:00Z"), + ev("review_request_removed", "octocat", "2026-04-21T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("non-review-request events are ignored", func(t *testing.T) { + events := []*github.Timeline{ + ev("commented", "octocat", "2026-04-25T10:00:00Z"), + ev("labeled", "octocat", "2026-04-26T10:00:00Z"), + ev("review_requested", "octocat", "2026-04-20T10:00:00Z"), + } + got := findMostRecentReviewRequestTime(events, "octocat") + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) +} + +func TestFindEarliestSurvivingTeamRequestTime(t *testing.T) { + at := func(s string) *github.Timestamp { + ts, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatalf("bad timestamp %q: %v", s, err) + } + return &github.Timestamp{Time: ts} + } + teamEv := func(name, slug, ts string) *github.Timeline { + return &github.Timeline{ + Event: github.String(name), + RequestedTeam: &github.Team{Slug: github.String(slug)}, + CreatedAt: at(ts), + } + } + + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + platform := graphql.DigestTeamRef{Org: "mattermost", Slug: "platform"} + + t.Run("empty teams returns zero", func(t *testing.T) { + got := findEarliestSurvivingTeamRequestTime(nil, nil) + assert.True(t, got.IsZero()) + }) + + t.Run("single team request returns its timestamp", func(t *testing.T) { + events := []*github.Timeline{teamEv("review_requested", "core", "2026-04-20T10:00:00Z")} + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("removed team request leaves no surviving anchor", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.True(t, got.IsZero()) + }) + + t.Run("re-request after remove uses the most recent request for that team", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("two surviving teams return the earlier ask", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "platform", "2026-04-15T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core, platform}) + assert.Equal(t, "2026-04-15T10:00:00Z", got.Format(time.RFC3339), + "reviewer in two requested teams has been on the hook since the EARLIEST surviving ask") + }) + + t.Run("removed team is excluded from the earliest calculation", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "platform", "2026-04-15T10:00:00Z"), + teamEv("review_request_removed", "platform", "2026-04-16T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core, platform}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("events for non-requested teams are ignored", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "other-team", "2026-04-15T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("user-scoped events with nil RequestedTeam are skipped", func(t *testing.T) { + userEv := &github.Timeline{ + Event: github.String("review_requested"), + Reviewer: &github.User{Login: github.String("alice")}, + CreatedAt: at("2026-04-15T10:00:00Z"), + } + events := []*github.Timeline{ + userEv, + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339)) + }) + + t.Run("out-of-order pages are sorted defensively", func(t *testing.T) { + events := []*github.Timeline{ + teamEv("review_requested", "core", "2026-04-22T10:00:00Z"), + teamEv("review_request_removed", "core", "2026-04-21T10:00:00Z"), + teamEv("review_requested", "core", "2026-04-20T10:00:00Z"), + } + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{core}) + assert.Equal(t, "2026-04-22T10:00:00Z", got.Format(time.RFC3339), + "defensive sort means the latest event wins after re-request, even if pages arrived out of order") + }) + + t.Run("team slug match is case-insensitive", func(t *testing.T) { + events := []*github.Timeline{teamEv("review_requested", "Core", "2026-04-20T10:00:00Z")} + got := findEarliestSurvivingTeamRequestTime(events, []graphql.DigestTeamRef{{Slug: "CORE"}}) + assert.Equal(t, "2026-04-20T10:00:00Z", got.Format(time.RFC3339)) + }) +} diff --git a/server/plugin/sla_digest.go b/server/plugin/sla_digest.go index eae14f275..a14a1d330 100644 --- a/server/plugin/sla_digest.go +++ b/server/plugin/sla_digest.go @@ -7,15 +7,17 @@ import ( "context" "fmt" "sort" + "strconv" "strings" "time" "github.com/google/go-github/v54/github" + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" + "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/pluginapi" "github.com/mattermost/mattermost/server/public/pluginapi/cluster" - "golang.org/x/oauth2" ) const ( @@ -24,9 +26,13 @@ const ( slaDigestMutexKey = "github_sla_digest_mutex" ) +// slaDigestEntry is one (reviewer, PR) pair past SLA. Stored as separate fields rather than +// a pre-baked single line so buildSLADigestMessage can group entries by reviewer within each +// bucket and avoid repeating the @-mention on every row. type slaDigestEntry struct { - DaysOverdue int - Line string + DaysOverdue int + ReviewerDisplay string // e.g. "@harrison (hmhealey)" or "(not connected) - hmhealey" + Body string // e.g. "mattermost/mattermost - [Fix race condition](url)" } // maybePostDailyOverdueSLADigest posts one aggregated message per local calendar day to the @@ -63,7 +69,14 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { return } - entries := p.collectAllOverdueSLAItems(ctx) + entries, ok := p.collectAllOverdueSLAItems(ctx) + if !ok { + // Distinguishes "digest could not complete a real scan" (config issue, no service user, + // or every configured org's GraphQL fetch failed) from "scan ran and found nothing + // overdue." We deliberately do NOT advance slaDigestDayKVKey here so the 5-minute + // scheduler retries within the same local day instead of skipping until tomorrow. + return + } if len(entries) == 0 { if _, err := p.store.Set(slaDigestDayKVKey, []byte(day)); err != nil { p.client.Log.Warn("Failed to store SLA digest day marker", "error", err.Error()) @@ -71,7 +84,7 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { return } - msg := buildSLADigestMessage(entries) + msg := buildSLADigestMessage(entries, cfg.ReviewTargetDays) post := &model.Post{ ChannelId: cfg.OverdueReviewsChannelID, UserId: p.BotUserID, @@ -87,113 +100,448 @@ func (p *Plugin) maybePostDailyOverdueSLADigest(ctx context.Context) { } } -func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) []slaDigestEntry { - config := p.getConfiguration() - targetDays := config.ReviewTargetDays - baseURL := config.getBaseURL() - orgList := config.getOrganizations() - now := time.Now() +// resolveReviewerDisplayName turns a GitHub login into the digest's reviewer column. +// Connected users get an actual Mattermost @-mention so the post notifies them; unconnected +// users render as "(not connected) - " so admins can see who is missing. +func (p *Plugin) resolveReviewerDisplayName(githubLogin string) string { + if githubLogin == "" { + return "" + } + mmUsername := p.getGitHubToUsernameMapping(githubLogin) + if mmUsername != "" { + return fmt.Sprintf("@%s (%s)", mmUsername, githubLogin) + } + return fmt.Sprintf("(not connected) - %s", githubLogin) +} +// pickServiceGitHubUser returns the first connected user the digest can use as a service +// caller for org-wide GitHub queries. Keys are sorted before iteration so the choice is +// deterministic across runs (and across cluster nodes) — otherwise the digest's visibility +// into private repos/teams could silently shift run-to-run with KV iteration order. +// +// Returns nil when no connected user is available; the digest cannot run in that case. +func (p *Plugin) pickServiceGitHubUser(ctx context.Context) *GitHubUserInfo { checker := func(key string) (bool, error) { return strings.HasSuffix(key, githubTokenKey), nil } - var out []slaDigestEntry - + var allKeys []string for page := 0; ; page++ { if ctx.Err() != nil { - return out + return nil } - keys, err := p.store.ListKeys(page, keysPerPage, pluginapi.WithChecker(checker)) if err != nil { - p.client.Log.Warn("Failed to list keys for SLA digest", "error", err.Error()) + p.client.Log.Warn("SLA digest failed to list connected users", "error", err.Error()) + return nil + } + allKeys = append(allKeys, keys...) + if len(keys) < keysPerPage { break } + } + sort.Strings(allKeys) - for _, key := range keys { - if ctx.Err() != nil { - return out - } + for _, key := range allKeys { + if ctx.Err() != nil { + return nil + } + userID := strings.TrimSuffix(key, githubTokenKey) + ghInfo, apiErr := p.getGitHubUserInfo(userID) + if apiErr != nil || ghInfo == nil { + continue + } + return ghInfo + } + return nil +} - userID := strings.TrimSuffix(key, githubTokenKey) - ghInfo, apiErr := p.getGitHubUserInfo(userID) - if apiErr != nil || ghInfo == nil { - time.Sleep(delayBetweenUsers) - continue +// collectAllOverdueSLAItems returns the digest's overdue entries along with an "ok" flag the +// caller uses to decide whether to advance the daily marker. ok is false when the digest +// could not complete a real scan (no orgs configured, no connected service user, or every +// configured org's GraphQL fetch failed); the caller should retry on the next scheduler tick +// rather than treat that as "ran successfully and found nothing." A successful scan returns +// ok=true even when entries is empty. +func (p *Plugin) collectAllOverdueSLAItems(ctx context.Context) ([]slaDigestEntry, bool) { + config := p.getConfiguration() + targetDays := config.ReviewTargetDays + orgList := config.getOrganizations() + now := time.Now() + + if len(orgList) == 0 { + p.client.Log.Warn("SLA digest cannot run without configured organizations (System Console -> Plugins -> GitHub -> GitHub Organizations)") + return nil, false + } + + serviceUser := p.pickServiceGitHubUser(ctx) + if serviceUser == nil { + p.client.Log.Warn("SLA digest cannot run: no connected GitHub user available to act as the service caller") + return nil, false + } + + githubClient := p.githubConnectUser(ctx, serviceUser) + graphQLClient := p.graphQLConnect(serviceUser) + + allPRs, anyOrgOK := p.fetchAllOrgOpenPRs(ctx, graphQLClient, orgList) + if !anyOrgOK { + p.client.Log.Warn("SLA digest cannot run: no configured organization returned a successful PR search") + return nil, false + } + if len(allPRs) == 0 { + return nil, true + } + + resolveTeam := newTeamMemberResolver(ctx, githubClient, p.client.Log) + resolveSLAStart, summarizeSLAStart := p.newDigestSLAStartResolver(ctx, githubClient) + defer summarizeSLAStart() + + out := make([]slaDigestEntry, 0, len(allPRs)) + seen := make(map[string]bool) + for _, pr := range allPRs { + if ctx.Err() != nil { + // A canceled context means the scan was interrupted partway through (e.g. + // scheduler shutdown). Whatever we've accumulated is a partial view, so we + // must not let the caller treat it as a real scan and advance the day + // marker. Return ok=false so the next scheduler tick retries. + return nil, false + } + ref := prRef{ + Owner: pr.Owner, + Repo: pr.Repo, + Number: pr.Number, + CreatedAt: github.Timestamp{Time: pr.CreatedAt}, + } + for _, rr := range gatherReviewersForPR(pr, resolveTeam) { + entry := p.evaluateOverdueForReviewer(ref, pr, rr, targetDays, now, seen, resolveSLAStart) + if entry != nil { + out = append(out, *entry) } + } + } + return out, true +} - githubClient := p.githubConnectUser(ctx, ghInfo) - var allIssues []*github.Issue - cErr := p.useGitHubClient(ghInfo, func(gi *GitHubUserInfo, token *oauth2.Token) error { - query := getReviewSearchQuery(gi.GitHubUsername, orgList) - opts := &github.SearchOptions{ListOptions: github.ListOptions{PerPage: 100}} - for { - result, resp, searchErr := githubClient.Search.Issues(ctx, query, opts) - if searchErr != nil { - return searchErr - } - allIssues = append(allIssues, result.Issues...) - if resp.NextPage == 0 { - break - } - opts.Page = resp.NextPage +// fetchAllOrgOpenPRs runs the org-wide PR search once per configured org, logging and skipping +// orgs that fail rather than aborting the whole digest. Returns the combined PR list and a +// flag that is true iff every configured org was visited and at least one returned +// successfully (a zero-PR org is still a success). A context cancellation mid-iteration +// forces anyOK to false: the caller uses anyOK to distinguish a "real but quiet scan" from +// an interrupted one, and a partial walk is the latter even if some orgs already responded. +func (p *Plugin) fetchAllOrgOpenPRs(ctx context.Context, graphQLClient *graphql.Client, orgList []string) (allPRs []graphql.DigestPR, anyOK bool) { + for _, org := range orgList { + if ctx.Err() != nil { + return nil, false + } + prs, err := graphQLClient.GetOpenPRsWithRequestedReviewers(ctx, org) + if err != nil { + p.client.Log.Warn("SLA digest org PR fetch failed", "org", org, "error", err.Error()) + continue + } + anyOK = true + allPRs = append(allPRs, prs...) + } + return allPRs, anyOK +} + +// newTeamMemberResolver returns a closure that expands org/team references to member logins, +// memoizing the result so the same team is fetched at most once per digest run. Keys are +// lowercased so case differences in the GraphQL response don't fragment the cache. +func newTeamMemberResolver(ctx context.Context, githubClient *github.Client, log pluginapi.LogService) func(graphql.DigestTeamRef) []string { + cache := make(map[string][]string) + return func(team graphql.DigestTeamRef) []string { + key := strings.ToLower(team.Org + "/" + team.Slug) + if members, ok := cache[key]; ok { + return members + } + var members []string + opts := &github.TeamListTeamMembersOptions{ListOptions: github.ListOptions{PerPage: 100}} + for { + page, resp, err := githubClient.Teams.ListTeamMembersBySlug(ctx, team.Org, team.Slug, opts) + if err != nil { + log.Debug("SLA digest team expansion failed", "team", key, "error", err.Error()) + break + } + for _, u := range page { + if login := u.GetLogin(); login != "" { + members = append(members, login) } - return nil - }) - if cErr != nil { - p.client.Log.Debug("SLA digest skipped user review search", "user_id", userID, "error", cErr.Error()) - time.Sleep(delayBetweenUsers) - continue } + if resp == nil || resp.NextPage == 0 { + break + } + opts.Page = resp.NextPage + } + cache[key] = members + return members + } +} - for _, pr := range allIssues { - slaStart := p.effectiveReviewSLAStart(pr, baseURL, ghInfo.GitHubUsername) - diff := slaCalendarDiffDays(slaStart, targetDays, now) - if diff >= 0 { - continue - } - daysOverdue := -diff - line := formatChannelOverdueReviewLine(ghInfo.GitHubUsername, pr.GetTitle(), pr.GetHTMLURL(), baseURL) - out = append(out, slaDigestEntry{DaysOverdue: daysOverdue, Line: line}) +// reviewerRequest preserves a reviewer login alongside the team(s) they were added through, so +// the SLA backfill can fall back to a team-scoped review_requested timeline event when the +// reviewer has no user-scoped request. Direct (non-team) requests carry an empty Teams slice. +// +// Each login appears at most once: a user who is both directly requested and a member of one +// or more requested teams aggregates into a single entry whose Teams slice records every team +// they were also part of (used as a tie-break only when the user-scoped lookup is empty). +type reviewerRequest struct { + Login string + Teams []graphql.DigestTeamRef +} + +// gatherReviewersForPR collapses a PR's directly-requested users and all members of its +// requested teams into a deduplicated reviewer set, preserving each reviewer's team origin so +// the SLA backfill can use a team-scoped review_requested event when no user-scoped event is +// available. Logins are matched case-insensitively for deduplication; the first-seen casing is +// preserved on the returned struct. +func gatherReviewersForPR(pr graphql.DigestPR, resolveTeam func(graphql.DigestTeamRef) []string) []reviewerRequest { + byLogin := make(map[string]int) + out := make([]reviewerRequest, 0, len(pr.RequestedUsers)) + // add is the single chokepoint for both call paths below (direct RequestedUsers and + // team-expanded logins). The empty-login guard belongs here, not at the loop sites, + // because it must apply uniformly to both: the GraphQL layer (digest_query.go) already + // drops empty user logins, but a third-party resolveTeam implementation could surface + // them and we'd still want them filtered before they leak into out[idx].Login. + add := func(login string, team *graphql.DigestTeamRef) { + if login == "" { + return + } + key := strings.ToLower(login) + idx, ok := byLogin[key] + if !ok { + out = append(out, reviewerRequest{Login: login}) + idx = len(out) - 1 + byLogin[key] = idx + } + if team != nil { + out[idx].Teams = append(out[idx].Teams, *team) + } + } + for _, login := range pr.RequestedUsers { + add(login, nil) + } + for _, team := range pr.RequestedTeams { + for _, login := range resolveTeam(team) { + add(login, &team) + } + } + return out +} + +// newDigestSLAStartResolver returns a closure that resolves a reviewer's SLA-start time for +// the digest, with three layers of lookup: +// +// 1. The KV record from the live review_requested webhook (free). +// 2. A user-scoped review_requested event from the PR timeline. Timeline pages are fetched +// at most once per PR per digest run and shared across reviewers on the same PR. +// 3. For reviewers added via team membership only, the earliest surviving team-scoped +// review_requested event. This avoids overstating overdue days for users who are pending +// solely because their team was requested. +// +// Resolved times (from either timeline path) are written back to KV under the user-scoped +// key so subsequent runs hit the fast path. Falls back to the PR open time when no +// review_requested event is found, matching the read-only effectiveReviewSLAStart contract. +// +// Also returns a summarize() callback the caller must invoke once the resolver is no longer +// in use (typically via defer). It logs cumulative cache hit / timeline-fetch counts at Info +// level so admins can verify the backfill is healthy across runs (see Day-1-vs-steady-state +// patterns in the docs). The callback is a no-op when neither counter moved, so digests that +// reach this point but never actually call the resolver (e.g. PRs with no requested +// reviewers) don't emit a misleading "0,0" line. +func (p *Plugin) newDigestSLAStartResolver(ctx context.Context, gh *github.Client) (resolve func(prRef, reviewerRequest) github.Timestamp, summarize func()) { + type prKey struct { + owner string + repo string + num int + } + timelineCache := make(map[prKey][]*github.Timeline) + timelineFetched := make(map[prKey]bool) + var fetched, hits int + + resolve = func(pr prRef, rr reviewerRequest) github.Timestamp { + if pr.Owner == "" || pr.Repo == "" || pr.Number == 0 { + return pr.CreatedAt + } + if t := p.getReviewSLAStartTime(pr.Owner, pr.Repo, pr.Number, rr.Login); !t.IsZero() { + hits++ + return github.Timestamp{Time: t} + } + if gh == nil { + return pr.CreatedAt + } + + key := prKey{pr.Owner, pr.Repo, pr.Number} + events, ok := timelineCache[key] + if !ok && !timelineFetched[key] { + var err error + events, err = fetchPRTimeline(ctx, gh, pr.Owner, pr.Repo, pr.Number) + if err != nil { + p.client.Log.Debug("SLA digest timeline fetch failed; falling back to PR created_at", + "owner", pr.Owner, "repo", pr.Repo, "pr", pr.Number, "error", err.Error()) + events = nil } + timelineCache[key] = events + timelineFetched[key] = true + fetched++ + } - time.Sleep(delayBetweenUsers) + if found := findMostRecentReviewRequestTime(events, rr.Login); !found.IsZero() { + p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, rr.Login, found) + return github.Timestamp{Time: found.UTC()} } + // No user-scoped event. For team-membership requests, anchor to the earliest still- + // surviving team request: the user has been on the hook since the first such ask. + if found := findEarliestSurvivingTeamRequestTime(events, rr.Teams); !found.IsZero() { + p.storeReviewSLAStart(pr.Owner, pr.Repo, pr.Number, rr.Login, found) + return github.Timestamp{Time: found.UTC()} + } + return pr.CreatedAt + } - if len(keys) < keysPerPage { - break + summarize = func() { + if fetched == 0 && hits == 0 { + return } + p.client.Log.Info("SLA digest backfill summary", + "timeline_pages_fetched", fetched, + "kv_hits", hits) } - return out + return resolve, summarize } -func buildSLADigestMessage(entries []slaDigestEntry) string { - buckets := make(map[int][]string) +// evaluateOverdueForReviewer returns a digest entry for (pr, reviewer) when the reviewer is +// past SLA, or nil when not overdue, already accounted for, or missing identity. seen is +// mutated to record the (pr, login) pair. gatherReviewersForPR already deduplicates a single +// PR's reviewer set, but seen guards against any unexpected duplicates that slip through +// (e.g. case-mismatched names from different code paths). +func (p *Plugin) evaluateOverdueForReviewer( + ref prRef, + pr graphql.DigestPR, + rr reviewerRequest, + targetDays int, + now time.Time, + seen map[string]bool, + resolveSLAStart func(prRef, reviewerRequest) github.Timestamp, +) *slaDigestEntry { + if rr.Login == "" { + return nil + } + dedupeKey := pr.Owner + "/" + pr.Repo + "#" + strconv.Itoa(pr.Number) + "@" + strings.ToLower(rr.Login) + if seen[dedupeKey] { + return nil + } + seen[dedupeKey] = true + + slaStart := resolveSLAStart(ref, rr) + diff := slaCalendarDiffDays(slaStart, targetDays, now) + if diff >= 0 { + return nil + } + + reviewerDisplay := p.resolveReviewerDisplayName(rr.Login) + body := formatChannelOverduePRBody(pr.Title, pr.URL, p.getConfiguration().getBaseURL()) + return &slaDigestEntry{DaysOverdue: -diff, ReviewerDisplay: reviewerDisplay, Body: body} +} + +// slaBuckets enumerates the digest's overdue buckets in display order (most overdue first). +// Each bucket emits a header even if empty buckets in between are skipped. +var slaBuckets = []struct { + label string + min int // inclusive; -1 for open-ended upper bucket + max int // inclusive; -1 for open-ended upper bucket +}{ + {label: "More than 1 year overdue", min: 366, max: -1}, + {label: "91-365 days overdue", min: 91, max: 365}, + {label: "31-90 days overdue", min: 31, max: 90}, + {label: "15-30 days overdue", min: 15, max: 30}, + {label: "8-14 days overdue", min: 8, max: 14}, + {label: "4-7 days overdue", min: 4, max: 7}, + {label: "1-3 days overdue", min: 1, max: 3}, +} + +// slaBucketIndex returns the index into slaBuckets for the given days-overdue value, or -1 if not overdue. +func slaBucketIndex(daysOverdue int) int { + if daysOverdue < 1 { + return -1 + } + for i, b := range slaBuckets { + if b.max == -1 { + if daysOverdue >= b.min { + return i + } + continue + } + if daysOverdue >= b.min && daysOverdue <= b.max { + return i + } + } + return -1 +} + +// reviewerBucketGroup is one reviewer's overdue PRs within a single bucket, used by +// buildSLADigestMessage to render `- \n - \n - ` instead of +// repeating the reviewer prefix on every PR row. +type reviewerBucketGroup struct { + ReviewerDisplay string + Bodies []string +} + +// groupBucketEntriesByReviewer collapses a bucket's entries into one group per reviewer, +// sorts bodies within each group, and sorts the groups themselves by reviewer display +// (case-insensitive). The deterministic ordering is required for stable channel posts and +// snapshot-style assertions; map iteration order alone would leave the output flaky. +func groupBucketEntriesByReviewer(entries []slaDigestEntry) []reviewerBucketGroup { + idxByReviewer := make(map[string]int, len(entries)) + out := make([]reviewerBucketGroup, 0, len(entries)) for _, e := range entries { - buckets[e.DaysOverdue] = append(buckets[e.DaysOverdue], e.Line) + idx, ok := idxByReviewer[e.ReviewerDisplay] + if !ok { + out = append(out, reviewerBucketGroup{ReviewerDisplay: e.ReviewerDisplay}) + idx = len(out) - 1 + idxByReviewer[e.ReviewerDisplay] = idx + } + out[idx].Bodies = append(out[idx].Bodies, e.Body) + } + for i := range out { + sort.Strings(out[i].Bodies) } + sort.SliceStable(out, func(i, j int) bool { + return strings.ToLower(out[i].ReviewerDisplay) < strings.ToLower(out[j].ReviewerDisplay) + }) + return out +} - days := make([]int, 0, len(buckets)) - for d := range buckets { - days = append(days, d) +func buildSLADigestMessage(entries []slaDigestEntry, targetDays int) string { + bucketEntries := make([][]slaDigestEntry, len(slaBuckets)) + for _, e := range entries { + idx := slaBucketIndex(e.DaysOverdue) + if idx < 0 { + continue + } + bucketEntries[idx] = append(bucketEntries[idx], e) } - sort.Sort(sort.Reverse(sort.IntSlice(days))) var b strings.Builder - b.WriteString("### Pull request reviews past SLA\n\n") - for _, d := range days { - lines := buckets[d] - sort.Strings(lines) + if targetDays > 0 { unit := "days" - if d == 1 { + if targetDays == 1 { unit = "day" } - fmt.Fprintf(&b, "#### %d %s overdue\n", d, unit) - for _, line := range lines { - b.WriteString(line) - b.WriteString("\n") + fmt.Fprintf(&b, "### Pull request reviews past SLA (target: %d %s from most recent review request)\n\n", targetDays, unit) + } else { + b.WriteString("### Pull request reviews past SLA\n\n") + } + for i, bucket := range slaBuckets { + bes := bucketEntries[i] + if len(bes) == 0 { + continue + } + fmt.Fprintf(&b, "#### %s\n", bucket.label) + for _, g := range groupBucketEntriesByReviewer(bes) { + fmt.Fprintf(&b, "- %s\n", g.ReviewerDisplay) + for _, body := range g.Bodies { + fmt.Fprintf(&b, " - %s\n", body) + } } b.WriteString("\n") } diff --git a/server/plugin/sla_digest_test.go b/server/plugin/sla_digest_test.go new file mode 100644 index 000000000..196ea5850 --- /dev/null +++ b/server/plugin/sla_digest_test.go @@ -0,0 +1,546 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package plugin + +import ( + "context" + "encoding/json" + "errors" + "strings" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/google/go-github/v54/github" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/oauth2" + + "github.com/mattermost/mattermost/server/public/plugin/plugintest" + "github.com/mattermost/mattermost/server/public/pluginapi" + + "github.com/mattermost/mattermost-plugin-github/server/mocks" + "github.com/mattermost/mattermost-plugin-github/server/plugin/graphql" +) + +func TestFormatChannelOverduePRBody(t *testing.T) { + const baseURL = "https://github.com/" + + t.Run("renders owner/repo and a markdown-linked title", func(t *testing.T) { + body := formatChannelOverduePRBody( + "Fix race condition", + "https://github.com/mattermost/mattermost/pull/12345", + baseURL, + ) + assert.Equal(t, "mattermost/mattermost - [Fix race condition](https://github.com/mattermost/mattermost/pull/12345)", body) + }) + + t.Run("escapes closing brackets in the title so the link does not terminate early", func(t *testing.T) { + body := formatChannelOverduePRBody( + "[MM-12345] Fix [thing]", + "https://github.com/mattermost/mattermost/pull/1", + baseURL, + ) + // Only `]` needs escaping inside a markdown link's display text; `[` is allowed. + assert.Contains(t, body, `[[MM-12345\] Fix [thing\]](https://github.com/mattermost/mattermost/pull/1)`) + }) + + t.Run("falls back to raw URL as the repo display when owner/repo cannot be parsed", func(t *testing.T) { + body := formatChannelOverduePRBody( + "Title", + "https://example.invalid/something/odd", + baseURL, + ) + assert.Contains(t, body, "https://example.invalid/something/odd - [Title]") + }) + + t.Run("truncates very long titles before linking", func(t *testing.T) { + title := strings.Repeat("a", 250) + body := formatChannelOverduePRBody( + title, + "https://github.com/mattermost/mattermost/pull/1", + baseURL, + ) + assert.Contains(t, body, "...](https://github.com/mattermost/mattermost/pull/1)") + // Title text inside the link should not exceed 200 'a's followed by the ellipsis. + assert.NotContains(t, body, strings.Repeat("a", 201)) + }) + + t.Run("does not include a leading bullet or reviewer prefix", func(t *testing.T) { + // Locks in the contract that the per-PR body is composable: the digest builder + // is responsible for the `- ` outer bullet and the reviewer header, not this helper. + body := formatChannelOverduePRBody("Fix", "https://github.com/m/m/pull/1", baseURL) + assert.False(t, strings.HasPrefix(body, "- "), "body should be composable, not pre-bulleted") + assert.False(t, strings.HasPrefix(body, "@"), "body should not carry a reviewer prefix") + }) +} + +func TestSLABucketIndex(t *testing.T) { + cases := []struct { + daysOverdue int + wantLabel string + }{ + {0, ""}, + {-3, ""}, + {1, "1-3 days overdue"}, + {3, "1-3 days overdue"}, + {4, "4-7 days overdue"}, + {7, "4-7 days overdue"}, + {8, "8-14 days overdue"}, + {14, "8-14 days overdue"}, + {15, "15-30 days overdue"}, + {30, "15-30 days overdue"}, + {31, "31-90 days overdue"}, + {90, "31-90 days overdue"}, + {91, "91-365 days overdue"}, + {365, "91-365 days overdue"}, + {366, "More than 1 year overdue"}, + {1000, "More than 1 year overdue"}, + } + + for _, tc := range cases { + idx := slaBucketIndex(tc.daysOverdue) + if tc.wantLabel == "" { + assert.Equal(t, -1, idx, "daysOverdue=%d expected no bucket", tc.daysOverdue) + continue + } + if assert.NotEqual(t, -1, idx, "daysOverdue=%d expected a bucket", tc.daysOverdue) { + assert.Equal(t, tc.wantLabel, slaBuckets[idx].label, "daysOverdue=%d", tc.daysOverdue) + } + } +} + +func TestBuildSLADigestMessage(t *testing.T) { + entry := func(days int, reviewer, body string) slaDigestEntry { + return slaDigestEntry{DaysOverdue: days, ReviewerDisplay: reviewer, Body: body} + } + + t.Run("groups entries into the correct buckets in display order", func(t *testing.T) { + entries := []slaDigestEntry{ + entry(2, "@a (a-gh)", "owner/repo - [A](url)"), + entry(5, "@b (b-gh)", "owner/repo - [B](url)"), + entry(12, "@c (c-gh)", "owner/repo - [C](url)"), + entry(100, "@d (d-gh)", "owner/repo - [D](url)"), + entry(400, "@e (e-gh)", "owner/repo - [E](url)"), + } + + msg := buildSLADigestMessage(entries, 3) + + assert.True(t, strings.HasPrefix(msg, "### Pull request reviews past SLA (target: 3 days from most recent review request)")) + + // Verify bucket order in message: most overdue first. + idxYear := strings.Index(msg, "#### More than 1 year overdue") + idx91 := strings.Index(msg, "#### 91-365 days overdue") + idx8 := strings.Index(msg, "#### 8-14 days overdue") + idx4 := strings.Index(msg, "#### 4-7 days overdue") + idx1 := strings.Index(msg, "#### 1-3 days overdue") + assert.True(t, idxYear >= 0 && idx91 > idxYear && idx8 > idx91 && idx4 > idx8 && idx1 > idx4, "buckets should appear in most-overdue-first order") + + assert.NotContains(t, msg, "#### 31-90 days overdue", "empty bucket should be omitted") + assert.NotContains(t, msg, "#### 15-30 days overdue", "empty bucket should be omitted") + }) + + t.Run("non-overdue entries are dropped", func(t *testing.T) { + entries := []slaDigestEntry{ + entry(0, "@skip (skip-gh)", "owner/repo - [skipped](url)"), + entry(-2, "@skip2 (skip2-gh)", "owner/repo - [skipped-too](url)"), + entry(1, "@keep (keep-gh)", "owner/repo - [kept](url)"), + } + msg := buildSLADigestMessage(entries, 3) + assert.Contains(t, msg, "[kept]") + assert.NotContains(t, msg, "[skipped]") + assert.NotContains(t, msg, "[skipped-too]") + }) + + t.Run("singular target days uses 'day'", func(t *testing.T) { + msg := buildSLADigestMessage([]slaDigestEntry{entry(1, "@x (x-gh)", "owner/repo - [X](url)")}, 1) + assert.Contains(t, msg, "target: 1 day from") + }) + + t.Run("zero target days falls back to plain header", func(t *testing.T) { + msg := buildSLADigestMessage([]slaDigestEntry{entry(1, "@x (x-gh)", "owner/repo - [X](url)")}, 0) + assert.True(t, strings.HasPrefix(msg, "### Pull request reviews past SLA\n")) + }) + + t.Run("reviewers within a bucket are sorted alphabetically (case-insensitive)", func(t *testing.T) { + entries := []slaDigestEntry{ + entry(2, "@Zeta (zeta-gh)", "owner/repo - [PR-z](url)"), + entry(2, "@alpha (alpha-gh)", "owner/repo - [PR-a](url)"), + entry(2, "@Mu (mu-gh)", "owner/repo - [PR-m](url)"), + } + msg := buildSLADigestMessage(entries, 3) + ai := strings.Index(msg, "@alpha") + mi := strings.Index(msg, "@Mu") + zi := strings.Index(msg, "@Zeta") + assert.True(t, ai >= 0 && mi > ai && zi > mi, "expected alpha < Mu < Zeta (case-insensitive) in output") + }) + + t.Run("multiple PRs by the same reviewer in one bucket render under a single reviewer header with sorted indented bodies", func(t *testing.T) { + const reviewer = "@harrison (hmhealey)" + entries := []slaDigestEntry{ + entry(2, reviewer, "owner/repo - [zeta-pr](https://example/pr/3)"), + entry(2, reviewer, "owner/repo - [alpha-pr](https://example/pr/1)"), + entry(2, reviewer, "owner/repo - [mu-pr](https://example/pr/2)"), + } + msg := buildSLADigestMessage(entries, 3) + + // The reviewer header must appear EXACTLY once in this bucket — that's the whole + // point of grouping; otherwise the digest still @-spams the reviewer per-PR. + bucketStart := strings.Index(msg, "#### 1-3 days overdue") + require.True(t, bucketStart >= 0, "expected the 1-3 days bucket header") + bucketSlice := msg[bucketStart:] + assert.Equal(t, 1, strings.Count(bucketSlice, "- "+reviewer+"\n"), + "reviewer should appear once per bucket as the outer bullet") + + // Bodies should be indented two spaces and sorted alphabetically within the group. + ai := strings.Index(bucketSlice, " - owner/repo - [alpha-pr]") + mi := strings.Index(bucketSlice, " - owner/repo - [mu-pr]") + zi := strings.Index(bucketSlice, " - owner/repo - [zeta-pr]") + assert.True(t, ai > 0 && mi > ai && zi > mi, + "PR bodies should be indented (` - `) and sorted alphabetically under the reviewer header") + }) + + t.Run("two reviewers in the same bucket render as two separate reviewer groups", func(t *testing.T) { + entries := []slaDigestEntry{ + entry(2, "@alice (alice-gh)", "o/r - [a-pr](url)"), + entry(2, "@alice (alice-gh)", "o/r - [b-pr](url)"), + entry(2, "@bob (bob-gh)", "o/r - [c-pr](url)"), + } + msg := buildSLADigestMessage(entries, 3) + bucketStart := strings.Index(msg, "#### 1-3 days overdue") + require.True(t, bucketStart >= 0) + bucket := msg[bucketStart:] + + // Each reviewer header appears exactly once; alice's two bodies are nested under hers. + assert.Equal(t, 1, strings.Count(bucket, "- @alice (alice-gh)\n")) + assert.Equal(t, 1, strings.Count(bucket, "- @bob (bob-gh)\n")) + assert.Contains(t, bucket, "- @alice (alice-gh)\n - o/r - [a-pr](url)\n - o/r - [b-pr](url)\n") + }) + + t.Run("groupBucketEntriesByReviewer is deterministic and sorts bodies within a group", func(t *testing.T) { + // Direct unit test on the helper: more focused than scanning the full message. + bucketEntries := []slaDigestEntry{ + {ReviewerDisplay: "@bob (bob-gh)", Body: "o/r - [b1](url)"}, + {ReviewerDisplay: "@alice (alice-gh)", Body: "o/r - [zeta](url)"}, + {ReviewerDisplay: "@alice (alice-gh)", Body: "o/r - [alpha](url)"}, + } + groups := groupBucketEntriesByReviewer(bucketEntries) + require.Len(t, groups, 2) + assert.Equal(t, "@alice (alice-gh)", groups[0].ReviewerDisplay) + assert.Equal(t, []string{"o/r - [alpha](url)", "o/r - [zeta](url)"}, groups[0].Bodies, + "bodies inside a reviewer group should be sorted alphabetically") + assert.Equal(t, "@bob (bob-gh)", groups[1].ReviewerDisplay) + }) +} + +func TestGatherReviewersForPR(t *testing.T) { + logins := func(rrs []reviewerRequest) []string { + out := make([]string, 0, len(rrs)) + for _, r := range rrs { + out = append(out, r.Login) + } + return out + } + + t.Run("flattens direct user requests with empty Teams", func(t *testing.T) { + pr := graphql.DigestPR{ + RequestedUsers: []string{"alice", "bob"}, + } + got := gatherReviewersForPR(pr, func(graphql.DigestTeamRef) []string { return nil }) + assert.Equal(t, []string{"alice", "bob"}, logins(got)) + for _, r := range got { + assert.Empty(t, r.Teams, "direct request should carry no team origin") + } + }) + + t.Run("expands team references and tags reviewer with the originating team", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"alice"}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(team graphql.DigestTeamRef) []string { + if team.Slug == "core" { + return []string{"bob", "carol"} + } + return nil + } + got := gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice", "bob", "carol"}, logins(got)) + assert.Empty(t, got[0].Teams, "alice was directly requested, not via team") + assert.Equal(t, []graphql.DigestTeamRef{core}, got[1].Teams) + assert.Equal(t, []graphql.DigestTeamRef{core}, got[2].Teams) + }) + + t.Run("user in both direct list and team list is deduplicated to one entry retaining team origin", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"bob"}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1, "bob should be merged into a single entry") + assert.Equal(t, "bob", got[0].Login) + assert.Equal(t, []graphql.DigestTeamRef{core}, got[0].Teams, + "team origin must survive deduplication so the SLA backfill can use the team request as a fallback") + }) + + t.Run("user requested via two teams accumulates both teams", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + platform := graphql.DigestTeamRef{Org: "mattermost", Slug: "platform"} + pr := graphql.DigestPR{ + RequestedTeams: []graphql.DigestTeamRef{core, platform}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1) + assert.ElementsMatch(t, []graphql.DigestTeamRef{core, platform}, got[0].Teams) + }) + + t.Run("does not mutate the underlying RequestedUsers slice", func(t *testing.T) { + original := []string{"alice"} + pr := graphql.DigestPR{ + RequestedUsers: original, + RequestedTeams: []graphql.DigestTeamRef{{Org: "mattermost", Slug: "core"}}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + _ = gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice"}, original, "team expansion must not append into the caller's slice") + }) + + t.Run("returns empty slice when no reviewers are requested", func(t *testing.T) { + got := gatherReviewersForPR(graphql.DigestPR{}, func(graphql.DigestTeamRef) []string { return nil }) + assert.Empty(t, got) + }) + + t.Run("empty logins are dropped from both direct and team-expanded paths", func(t *testing.T) { + // Locks in the invariant that no reviewerRequest with Login=="" can reach the + // caller, regardless of which code path a bad value comes in on. The GraphQL + // layer already filters empty user logins, but resolveTeam is a callback whose + // implementation we don't control, so the protection has to apply uniformly. + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"", "alice", ""}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"", "bob", ""} } + got := gatherReviewersForPR(pr, resolver) + assert.Equal(t, []string{"alice", "bob"}, logins(got), + "empty logins must be dropped from both pr.RequestedUsers and team-expanded results") + for _, r := range got { + assert.NotEmpty(t, r.Login, "no reviewerRequest may reach the caller with an empty login") + } + }) + + t.Run("dedupe is case-insensitive on login", func(t *testing.T) { + core := graphql.DigestTeamRef{Org: "mattermost", Slug: "core"} + pr := graphql.DigestPR{ + RequestedUsers: []string{"Bob"}, + RequestedTeams: []graphql.DigestTeamRef{core}, + } + resolver := func(graphql.DigestTeamRef) []string { return []string{"bob"} } + got := gatherReviewersForPR(pr, resolver) + require.Len(t, got, 1) + assert.Equal(t, "Bob", got[0].Login, "first-seen casing wins") + }) +} + +func TestPickServiceGitHubUser_DeterministicOrdering(t *testing.T) { + // Returns the lexicographically-first connected user even when KV iteration order is + // reversed, so digest output is stable across runs and cluster nodes. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return( + []string{ + "user-z" + githubTokenKey, + "user-a" + githubTokenKey, + "user-m" + githubTokenKey, + }, + nil, + ) + + encryptedToken, err := encrypt([]byte("dummyEncryptKey1"), MockAccessToken) + require.NoError(t, err) + storedInfo := GitHubUserInfo{ + UserID: "user-a", + GitHubUsername: "user-a-gh", + Token: &oauth2.Token{AccessToken: encryptedToken}, + Settings: &UserSettings{}, + } + infoBytes, err := json.Marshal(&storedInfo) + require.NoError(t, err) + + mockKvStore.EXPECT().Get("user-a"+githubTokenKey, gomock.Any()).DoAndReturn( + func(_ string, out any) error { + return json.Unmarshal(infoBytes, out) + }, + ) + + picked := p.pickServiceGitHubUser(context.Background()) + require.NotNil(t, picked, "expected a connected user to be picked") + assert.Equal(t, "user-a", picked.UserID, "expected lexicographically-first user") +} + +func TestPickServiceGitHubUser_SkipsUsersWithBrokenTokens(t *testing.T) { + // A user whose KV record can't be loaded should be skipped, not abort the digest. We + // expect the iterator to fall through to the next user. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return( + []string{"user-a" + githubTokenKey, "user-b" + githubTokenKey}, + nil, + ) + + mockKvStore.EXPECT().Get("user-a"+githubTokenKey, gomock.Any()).Return(errors.New("kv read failed")) + + encryptedToken, err := encrypt([]byte("dummyEncryptKey1"), MockAccessToken) + require.NoError(t, err) + storedInfo := GitHubUserInfo{ + UserID: "user-b", + GitHubUsername: "user-b-gh", + Token: &oauth2.Token{AccessToken: encryptedToken}, + Settings: &UserSettings{}, + } + infoBytes, err := json.Marshal(&storedInfo) + require.NoError(t, err) + mockKvStore.EXPECT().Get("user-b"+githubTokenKey, gomock.Any()).DoAndReturn( + func(_ string, out any) error { + return json.Unmarshal(infoBytes, out) + }, + ) + + picked := p.pickServiceGitHubUser(context.Background()) + require.NotNil(t, picked) + assert.Equal(t, "user-b", picked.UserID) +} + +func TestPickServiceGitHubUser_NoConnectedUsers(t *testing.T) { + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + mockKvStore.EXPECT().ListKeys(0, keysPerPage, gomock.Any()).Return([]string{}, nil) + + assert.Nil(t, p.pickServiceGitHubUser(context.Background())) +} + +func TestNewDigestSLAStartResolver_LogsAccurateCounters(t *testing.T) { + // The summary log must (a) not fire until summarize() is called and (b) reflect the + // resolver's actual usage when it does. Locks in the fix for a prior bug where the log + // was deferred inside the constructor and so always reported zeroes. + p, mockKvStore, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + cachedTime := time.Date(2026, 4, 1, 12, 0, 0, 0, time.UTC) + mockKvStore.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ string, out any) error { + b, ok := out.(*[]byte) + require.True(t, ok, "getReviewSLAStartTime should pass *[]byte") + *b = []byte(cachedTime.Format(time.RFC3339Nano)) + return nil + }, + ).AnyTimes() + + api, ok := p.API.(*plugintest.API) + require.True(t, ok, "expected plugintest.API") + var ( + logCalls int + loggedHits int + loggedFetches int + ) + api.On("LogInfo", + "SLA digest backfill summary", + "timeline_pages_fetched", mock.AnythingOfType("int"), + "kv_hits", mock.AnythingOfType("int"), + ).Run(func(args mock.Arguments) { + logCalls++ + loggedFetches = args.Get(2).(int) + loggedHits = args.Get(4).(int) + }).Maybe() + + resolve, summarize := p.newDigestSLAStartResolver(context.Background(), nil) + require.Equal(t, 0, logCalls, "summary must not fire at construction time") + + got := resolve(prRef{ + Owner: "owner", + Repo: "repo", + Number: 1, + CreatedAt: github.Timestamp{Time: time.Now().UTC()}, + }, reviewerRequest{Login: "alice"}) + require.Equal(t, 0, logCalls, "summary must not fire on resolver invocation") + require.True(t, got.UTC().Equal(cachedTime), + "resolver should return the cached SLA start time; got %v want %v", got.UTC(), cachedTime) + + summarize() + assert.Equal(t, 1, logCalls, "summary should fire exactly once when summarize() is called") + assert.Equal(t, 1, loggedHits, "kv_hits should reflect the resolver's actual usage after one cached lookup") + assert.Equal(t, 0, loggedFetches, "no timeline pages fetched on the cached path") +} + +func TestFetchAllOrgOpenPRs_CanceledContextReturnsNotOK(t *testing.T) { + // A canceled context mid-iteration must NOT report anyOK=true to the caller, even if + // earlier orgs in the list had already returned successfully (here we cancel before any + // org runs, which is the strict version: anyOK starts false and must stay false). The + // caller uses this to decide whether to advance slaDigestDayKVKey — an interrupted scan + // should retry on the next scheduler tick, not silently skip until tomorrow. + p, _, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + // graphQLClient is intentionally nil: the ctx.Err() check fires before any GraphQL call, + // so a nil client must never be dereferenced on the cancellation path. Locks in the + // fast-fail-without-network behavior alongside the failure-flag contract. + prs, ok := p.fetchAllOrgOpenPRs(ctx, nil, []string{"mattermost"}) + assert.Nil(t, prs, "canceled context should not surface partial results") + assert.False(t, ok, "canceled context must not be reported as a successful scan") +} + +func TestNewDigestSLAStartResolver_SkipsSummaryLogWhenNoLookups(t *testing.T) { + // summarize() is wired up via defer in collectAllOverdueSLAItems, so it can fire even + // when the resolver was never invoked (e.g. PRs with no requested reviewers, or + // degenerate prRefs that take the early return inside resolve). When no lookups ran, + // the "0,0" summary line is misleading and is intentionally suppressed. + p, _, ctrl := setupServiceUserPickTest(t) + defer ctrl.Finish() + + api, ok := p.API.(*plugintest.API) + require.True(t, ok, "expected plugintest.API") + var logCalls int + api.On("LogInfo", + "SLA digest backfill summary", + "timeline_pages_fetched", mock.AnythingOfType("int"), + "kv_hits", mock.AnythingOfType("int"), + ).Run(func(mock.Arguments) { logCalls++ }).Maybe() + + _, summarize := p.newDigestSLAStartResolver(context.Background(), nil) + summarize() + assert.Equal(t, 0, logCalls, "summary must be suppressed when no lookups ran") +} + +// setupServiceUserPickTest wires up a Plugin instance with a mocked KvStore and a mock +// plugintest.API that swallows the warn-level logs pickServiceGitHubUser emits on failure +// paths. Returns the plugin, the KV mock for setting expectations, and the gomock controller +// for the caller to Finish(). +func setupServiceUserPickTest(t *testing.T) (*Plugin, *mocks.MockKvStore, *gomock.Controller) { + t.Helper() + ctrl := gomock.NewController(t) + mockKvStore := mocks.NewMockKvStore(ctrl) + + api := &plugintest.API{} + api.On("LogWarn", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + api.On("LogDebug", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe() + + p := NewPlugin() + p.store = mockKvStore + p.SetAPI(api) + p.client = pluginapi.NewClient(api, p.Driver) + p.setConfiguration(&Configuration{EncryptionKey: "dummyEncryptKey1"}) + + return p, mockKvStore, ctrl +} diff --git a/server/plugin/utils.go b/server/plugin/utils.go index d7adc8e72..4f590aa6f 100644 --- a/server/plugin/utils.go +++ b/server/plugin/utils.go @@ -29,8 +29,12 @@ func getMentionSearchQuery(username string, orgs []string) string { return buildSearchQuery("is:open mentions:%v archived:false %v", username, orgs) } +// getReviewSearchQuery returns the GitHub search query for a user's pending review requests. +// Drafts are excluded from the search results, but the SLA clock still runs through any draft +// period: review_requested webhooks are recorded regardless of draft state, so a PR that was +// review-requested while draft and later un-drafted retains its original SLA start time. func getReviewSearchQuery(username string, orgs []string) string { - return buildSearchQuery("is:pr is:open review-requested:%v archived:false %v", username, orgs) + return buildSearchQuery("is:pr is:open draft:false review-requested:%v archived:false %v", username, orgs) } func getYourPrsSearchQuery(username string, orgs []string) string { @@ -378,8 +382,13 @@ func slaCalendarDiffDays(createdAt github.Timestamp, targetDays int, now time.Ti return int(dueDay.Sub(todayDay) / (24 * time.Hour)) } -// formatChannelOverdueReviewLine formats a single line for the overdue-SLA channel digest. -func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) string { +// formatChannelOverduePRBody formats the per-PR portion of an overdue-SLA digest line: +// "/ - [](<url>)". The reviewer column and list-bullet prefix are +// composed by the caller so the digest can group several of a reviewer's overdue PRs under +// a single reviewer header without duplicating their @-mention on every row. When owner/repo +// cannot be parsed from htmlURL the raw URL is used as the repo display, so nothing renders +// as a bare " / ". Long titles are truncated to 200 chars + "...". +func formatChannelOverduePRBody(title, htmlURL, baseURL string) string { owner, repo := parseOwnerAndRepo(htmlURL, baseURL) repoDisplay := fmt.Sprintf("%s/%s", owner, repo) if owner == "" || repo == "" { @@ -388,7 +397,22 @@ func formatChannelOverdueReviewLine(githubLogin, title, htmlURL, baseURL string) if len(title) > 200 { title = strings.TrimSpace(title[:200]) + "..." } - return fmt.Sprintf("- %s - %s : %s", githubLogin, repoDisplay, title) + titleDisplay := title + if htmlURL != "" { + titleDisplay = fmt.Sprintf("[%s](%s)", escapeMarkdownLinkText(title), htmlURL) + } + return fmt.Sprintf("%s - %s", repoDisplay, titleDisplay) +} + +// escapeMarkdownLinkText escapes characters that would break a markdown link's display text. +// We only escape `]` and `\` so that titles containing brackets (common in JIRA-prefixed PRs) +// do not terminate the link early. +func escapeMarkdownLinkText(s string) string { + if s == "" { + return s + } + r := strings.NewReplacer(`\`, `\\`, `]`, `\]`) + return r.Replace(s) } // reviewSLAMarkdown returns a Markdown SLA suffix for Mattermost posts and whether the review is overdue. diff --git a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx index 14c24272f..7ae218778 100644 --- a/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx +++ b/webapp/src/components/sidebar_buttons/sidebar_buttons.jsx @@ -7,6 +7,7 @@ import PropTypes from 'prop-types'; import {makeStyleFromTheme, changeOpacity} from 'mattermost-redux/utils/theme_utils'; import {RHSStates} from '../../constants'; +import {reviewsHaveOverdue} from '../../utils/sla'; export default class SidebarButtons extends React.PureComponent { static propTypes = { @@ -211,45 +212,18 @@ export default class SidebarButtons extends React.PureComponent { } } -function reviewHasOverdue(reviews, targetDays) { - if (!targetDays || !reviews || reviews.length === 0) { - return false; - } - for (const pr of reviews) { - const startIso = pr.review_sla_start || pr.created_at; - if (!startIso) { - continue; - } - const created = new Date(startIso); - if (Number.isNaN(created.getTime())) { - continue; - } - const due = new Date(Date.UTC( - created.getUTCFullYear(), - created.getUTCMonth(), - created.getUTCDate(), - )); - due.setUTCDate(due.getUTCDate() + targetDays); - const today = new Date(); - const todayUTC = Date.UTC(today.getUTCFullYear(), today.getUTCMonth(), today.getUTCDate()); - const dueUTC = Date.UTC(due.getUTCFullYear(), due.getUTCMonth(), due.getUTCDate()); - const diffDays = Math.round((dueUTC - todayUTC) / (24 * 60 * 60 * 1000)); - if (diffDays < 0) { - return true; - } - } - return false; -} - function reviewButtonStyle(base, reviews, targetDays) { - if (!targetDays) { + // Match getReviewSLAStatus / reviewsHaveOverdue: a non-positive target means SLA + // is not configured. !targetDays alone would let a negative value through and + // produce a misleading green indicator. + if (!targetDays || targetDays <= 0) { return base; } const list = reviews || []; if (list.length === 0) { return base; } - if (reviewHasOverdue(list, targetDays)) { + if (reviewsHaveOverdue(list, targetDays)) { return {...base, color: 'var(--dnd-indicator)'}; } return {...base, color: 'var(--online-indicator)'}; diff --git a/webapp/src/components/sidebar_right/github_items.tsx b/webapp/src/components/sidebar_right/github_items.tsx index e98d2bb88..453f1e20a 100644 --- a/webapp/src/components/sidebar_right/github_items.tsx +++ b/webapp/src/components/sidebar_right/github_items.tsx @@ -12,6 +12,7 @@ import {GitPullRequestIcon, IssueOpenedIcon, IconProps, CalendarIcon, PersonIcon import {GithubItemsProps, GithubLabel, GithubItem, Review} from '../../types/github_types'; import {formatTimeSince} from '../../utils/date_utils'; +import {getReviewSLAStatus} from '../../utils/sla'; import CrossIcon from '../../images/icons/cross'; import DotIcon from '../../images/icons/dot'; @@ -37,6 +38,7 @@ const notificationReasons = { function GithubItems(props: GithubItemsProps) { const style = getStyle(props.theme); + const showReviewSLA = Boolean(props.showReviewSLA && props.reviewTargetDays && props.reviewTargetDays > 0); return props.items.length > 0 ? props.items.map((item) => { let repoName = ''; @@ -137,6 +139,11 @@ function GithubItems(props: GithubItemsProps) { const reviews = getReviewText(item, style, (item.created_at != null || userName != null || milestone != null)); + let slaBadge: JSX.Element | null = null; + if (showReviewSLA) { + slaBadge = renderReviewSLABadge(item, props.reviewTargetDays || 0, style); + } + // Status images pasted directly from GitHub. Change to our own version when styles are decided. let status: JSX.Element | null = null; if (item.status) { @@ -304,6 +311,7 @@ function GithubItems(props: GithubItemsProps) { {notificationReasons[item.reason as keyof typeof notificationReasons]} </>) : null } </div> + {slaBadge} {reviews} {pullRequestDetails} </div> @@ -375,9 +383,61 @@ const getStyle = makeStyleFromTheme((theme) => { prOpenSince: { margin: '5px 0 0 0', }, + slaBadgeRow: { + margin: '6px 0 0 0', + }, + slaBadgeBase: { + display: 'inline-block', + padding: '2px 8px', + borderRadius: '4px', + fontSize: '12px', + fontWeight: 600, + lineHeight: 1.4, + }, + slaBadgeOverdue: { + backgroundColor: changeOpacity(theme.dndIndicator, 0.15), + color: theme.dndIndicator, + }, + slaBadgeDueToday: { + backgroundColor: changeOpacity(theme.awayIndicator, 0.2), + color: theme.awayIndicator, + }, + slaBadgeDueLater: { + backgroundColor: changeOpacity(theme.centerChannelColor, 0.08), + color: changeOpacity(theme.centerChannelColor, 0.75), + }, }; }); +function renderReviewSLABadge(item: GithubItem, targetDays: number, style: any): JSX.Element | null { + const status = getReviewSLAStatus(item, targetDays); + if (!status) { + return null; + } + + let label: string; + let badgeStyle: CSS.Properties; + if (status.daysFromDue < 0) { + const overdueDays = -status.daysFromDue; + label = `Overdue by ${overdueDays} ${overdueDays === 1 ? 'day' : 'days'}`; + badgeStyle = style.slaBadgeOverdue; + } else if (status.daysFromDue === 0) { + label = 'Due today'; + badgeStyle = style.slaBadgeDueToday; + } else { + label = `Due in ${status.daysFromDue} ${status.daysFromDue === 1 ? 'day' : 'days'}`; + badgeStyle = style.slaBadgeDueLater; + } + + return ( + <div style={style.slaBadgeRow}> + <span style={{...style.slaBadgeBase, ...badgeStyle}}> + {label} + </span> + </div> + ); +} + function getGithubLabels(labels: GithubLabel[]) { return labels.map((label) => { return ( diff --git a/webapp/src/components/sidebar_right/index.jsx b/webapp/src/components/sidebar_right/index.jsx index 25717d359..3e62e1c69 100644 --- a/webapp/src/components/sidebar_right/index.jsx +++ b/webapp/src/components/sidebar_right/index.jsx @@ -11,7 +11,7 @@ import {getSidebarData} from 'src/selectors'; import SidebarRight from './sidebar_right.jsx'; function mapStateToProps(state) { - const {username, reviews, yourPrs, yourAssignments, unreads, enterpriseURL, orgs, rhsState} = getSidebarData(state); + const {username, reviews, yourPrs, yourAssignments, unreads, enterpriseURL, orgs, rhsState, reviewTargetDays} = getSidebarData(state); return { username, reviews, @@ -21,6 +21,7 @@ function mapStateToProps(state) { enterpriseURL, orgs, rhsState, + reviewTargetDays, }; } diff --git a/webapp/src/components/sidebar_right/sidebar_right.jsx b/webapp/src/components/sidebar_right/sidebar_right.jsx index a50f4a00d..567527929 100644 --- a/webapp/src/components/sidebar_right/sidebar_right.jsx +++ b/webapp/src/components/sidebar_right/sidebar_right.jsx @@ -73,6 +73,7 @@ export default class SidebarRight extends React.PureComponent { yourPrs: PropTypes.arrayOf(PropTypes.object), yourAssignments: PropTypes.arrayOf(PropTypes.object), rhsState: PropTypes.string, + reviewTargetDays: PropTypes.number, theme: PropTypes.object.isRequired, actions: PropTypes.shape({ getYourPrsDetails: PropTypes.func.isRequired, @@ -167,6 +168,8 @@ export default class SidebarRight extends React.PureComponent { <GithubItems items={githubItems} theme={this.props.theme} + showReviewSLA={rhsState === RHSStates.REVIEWS} + reviewTargetDays={this.props.reviewTargetDays || 0} /> </div> </Scrollbars> diff --git a/webapp/src/reducers/index.ts b/webapp/src/reducers/index.ts index f253aceb4..6cb4adb22 100644 --- a/webapp/src/reducers/index.ts +++ b/webapp/src/reducers/index.ts @@ -63,15 +63,15 @@ function userSettings(state = { } } -function configuration(state = { +function configuration(state: ConfigurationData = { left_sidebar_enabled: true, review_target_days: 0, -}, action: {type: string, data: ConnectedData | ConfigurationData}) { +}, action: {type: string, data: ConnectedData | ConfigurationData}): ConfigurationData { switch (action.type) { case ActionTypes.RECEIVED_CONNECTED: return { ...state, - ...(action.data as ConnectedData).configuration, + ...((action.data as ConnectedData).configuration as Partial<ConfigurationData>), }; case ActionTypes.RECEIVED_CONFIGURATION: return action.data as ConfigurationData; diff --git a/webapp/src/selectors.ts b/webapp/src/selectors.ts index defe70466..2ecd0d3c6 100644 --- a/webapp/src/selectors.ts +++ b/webapp/src/selectors.ts @@ -54,7 +54,7 @@ function mapPrsToDetails(prs: GithubIssueData[], details: PrsDetailsData[]) { export const getSidebarData = createSelector( getPluginState, (pluginState): SidebarData => { - const {username, sidebarContent, reviewDetails, yourPrDetails, organizations, rhsState} = pluginState; + const {username, sidebarContent, reviewDetails, yourPrDetails, organizations, rhsState, configuration: pluginConfig} = pluginState; return { username, reviews: mapPrsToDetails(sidebarContent.reviews || emptyArray, reviewDetails), @@ -63,6 +63,7 @@ export const getSidebarData = createSelector( unreads: sidebarContent.unreads || emptyArray, orgs: organizations, rhsState, + reviewTargetDays: pluginConfig.review_target_days || 0, }; }, ); diff --git a/webapp/src/types/github_types.ts b/webapp/src/types/github_types.ts index e5b90415d..a1800e264 100644 --- a/webapp/src/types/github_types.ts +++ b/webapp/src/types/github_types.ts @@ -56,6 +56,12 @@ export type GithubItem = PrsDetailsData & { export type GithubItemsProps = { items: GithubItem[]; theme: Theme; + + /** When true, render an SLA-status badge (Overdue / Due today / Due in N days) next to each item. */ + showReviewSLA?: boolean; + + /** SLA target in days, used to compute the badge. Falsy disables the badge regardless of showReviewSLA. */ + reviewTargetDays?: number; } export type UserSettingsData = { @@ -171,7 +177,8 @@ export type SidebarData = { yourAssignments: GithubIssueData[], unreads: UnreadsData[] orgs: string[], - rhsState?: string | null + rhsState?: string | null, + reviewTargetDays: number, } export type Organization = { diff --git a/webapp/src/utils/sla.ts b/webapp/src/utils/sla.ts new file mode 100644 index 000000000..0e0fe322b --- /dev/null +++ b/webapp/src/utils/sla.ts @@ -0,0 +1,84 @@ +// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +const MS_PER_DAY = 24 * 60 * 60 * 1000; + +// daysFromDue is negative when overdue, 0 when due today, positive when in the future. +export type ReviewSLAStatus = { + daysFromDue: number; + overdue: boolean; +}; + +/** + * Returns the ISO timestamp the review SLA clock should start from. Prefers + * review_sla_start (the most recent review request the plugin recorded) and + * falls back to created_at. Returns null when neither is set. + */ +export function getReviewSLAStartIso(item: {review_sla_start?: string | null; created_at?: string | null}): string | null { + if (item.review_sla_start) { + return item.review_sla_start; + } + if (item.created_at) { + return item.created_at; + } + return null; +} + +/** + * Computes the SLA status for a review item, or null when no useful answer is + * possible (no target configured, no start date, unparsable date). The "days" + * are calendar days computed against today's UTC date, matching the server's + * digest math. + */ +export function getReviewSLAStatus( + item: {review_sla_start?: string | null; created_at?: string | null}, + targetDays: number, +): ReviewSLAStatus | null { + if (!targetDays || targetDays <= 0) { + return null; + } + + const startIso = getReviewSLAStartIso(item); + if (!startIso) { + return null; + } + + const start = new Date(startIso); + if (Number.isNaN(start.getTime())) { + return null; + } + + const dueUTC = Date.UTC( + start.getUTCFullYear(), + start.getUTCMonth(), + start.getUTCDate() + targetDays, + ); + const today = new Date(); + const todayUTC = Date.UTC(today.getUTCFullYear(), today.getUTCMonth(), today.getUTCDate()); + const daysFromDue = Math.round((dueUTC - todayUTC) / MS_PER_DAY); + + return { + daysFromDue, + overdue: daysFromDue < 0, + }; +} + +/** + * True when at least one review in the list is overdue against the target. + * Used to drive the red "needs review" indicator on the sidebar button. + */ +export function reviewsHaveOverdue( + reviews: Array<{review_sla_start?: string | null; created_at?: string | null}> | null | undefined, + targetDays: number, +): boolean { + if (!targetDays || !reviews || reviews.length === 0) { + return false; + } + for (const pr of reviews) { + const status = getReviewSLAStatus(pr, targetDays); + if (status && status.overdue) { + return true; + } + } + return false; +}