diff --git a/.claude/settings.json b/.claude/settings.json index ebdc6b2..4ea3d06 100644 --- a/.claude/settings.json +++ b/.claude/settings.json @@ -6,7 +6,7 @@ "hooks": [ { "type": "command", - "command": "printf '\\n๐Ÿ“ฆ Agent Friendly Code โ€” current release: 0.3.0\\n โ€ข Read AGENTS.md for conventions, CONTRIBUTING.md for the PR workflow.\\n โ€ข Roadmap: 0.4.0 (quick wins โ€” history-aware signals + PR score-diff + Claude Code skill) โ†’ 0.5.0 (auto-refresh + smarter matching) โ†’ 0.6.0 (maintainer ownership + at-scale discovery) โ†’ 1.0.0 (production cut โ€” Postgres + at-scale indexing + benchmark harness).\\n โ€ข Changelog rule: user-facing capabilities only. Codebase hygiene (CI / linter / tests / CONTRIBUTING) does NOT go in lib/changelog.ts.\\n'" + "command": "printf '\\n๐Ÿ“ฆ Agent Friendly Code โ€” current release: 0.4.0\\n โ€ข Read AGENTS.md for conventions, CONTRIBUTING.md for the PR workflow.\\n โ€ข Roadmap: 0.5.0 (quick wins โ€” history-aware signals + PR score-diff + Claude Code skill) โ†’ 0.6.0 (auto-refresh + smarter matching โ€” webhook rescoring + alternatives v2) โ†’ 0.7.0 (maintainer ownership + at-scale discovery โ€” OAuth opt-out + package overlay at scale) โ†’ 1.0.0 (production cut โ€” Postgres + at-scale indexing + benchmark harness).\\n โ€ข Changelog rule: user-facing capabilities only. Codebase hygiene (CI / linter / tests / CONTRIBUTING) does NOT go in lib/changelog.ts.\\n'" } ] } diff --git a/.claude/skills/code-review/SKILL.md b/.claude/skills/code-review/SKILL.md index 81cd9eb..c400ac6 100644 --- a/.claude/skills/code-review/SKILL.md +++ b/.claude/skills/code-review/SKILL.md @@ -54,7 +54,7 @@ Only `@phosphor-icons/react`. Block Lucide, Heroicons, React Icons, inline SVG, ## Security - Parameterised SQL only. -- `dangerouslySetInnerHTML` is allowed only for the existing server-built JSON-LD scripts (`app/layout.tsx`, `app/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`) with the `<` โ†’ `<` escape preserved. Reject any new use. +- `dangerouslySetInnerHTML` is allowed only for the existing server-built JSON-LD scripts (`app/layout.tsx`, `app/page.tsx`, `app/methodology/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`) with the `<` โ†’ `<` escape preserved. Reject any new use. - External links include `rel="noopener noreferrer"`. - Never execute code from a cloned repo. diff --git a/.claude/skills/quality-check/SKILL.md b/.claude/skills/quality-check/SKILL.md index 2b795c9..b4999ff 100644 --- a/.claude/skills/quality-check/SKILL.md +++ b/.claude/skills/quality-check/SKILL.md @@ -39,7 +39,7 @@ Run the four checks below on any diff affecting UI or I/O. Report findings group ## Security - **SQL parameterisation**: every query uses `?` placeholders. No string concatenation. -- **`dangerouslySetInnerHTML`** is allowed only for server-built JSON-LD (`app/layout.tsx`, `app/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`) and must keep the `<` โ†’ `<` escape. Any other use must be rejected. +- **`dangerouslySetInnerHTML`** is allowed only for server-built JSON-LD (`app/layout.tsx`, `app/page.tsx`, `app/methodology/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`) and must keep the `<` โ†’ `<` escape. Any other use must be rejected. - **External URLs** in `` always include `rel="noopener noreferrer"`. - **User input at every boundary** is validated: `parseRepoUrl` for repo URLs, `Number.isFinite` for numeric params, length caps on search strings. - **Clone safety**: `git clone --depth 1 --single-branch`; never execute code from a clone (no `bun install`, no `npm install`, no post-clone scripts). diff --git a/AGENTS.md b/AGENTS.md index b80f45d..9abfb4c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -43,14 +43,17 @@ app/ page.tsx # leaderboard repo/[id]/page.tsx # repo detail with per-model suggestions (includes generateMetadata) methodology/page.tsx # how the static scoring works + about/page.tsx # who built this and why (footer-linked, E-E-A-T) roadmap/page.tsx # upcoming versions (from lib/roadmap.ts) changelog/page.tsx # what's in this build (from lib/changelog.ts) - robots.ts # /robots.txt โ€” allows "/", disallows "/api/" - sitemap.ts # /sitemap.xml โ€” static routes + every repo detail page + robots.ts # /robots.txt โ€” wildcard + explicit AI-crawler allows + sitemap.ts # /sitemap.xml โ€” static routes + every repo detail page (priority scaled by score) + llms.txt/route.ts # /llms.txt โ€” markdown manifest for LLM crawlers (Perplexity, Claude, ChatGPT search) api/repos/route.ts api/repo/[id]/route.ts api/badge/[host]/[owner]/[name]/route.ts # SVG badge for README embeds (?model= for per-model) api/package/[registry]/[name]/route.ts # npm/PyPI/Cargo lookup โ†’ source-repo score + repo/[id]/opengraph-image.tsx # next/og convention โ€” per-repo OG image (auto-wired) package/page.tsx # explainer + try-it examples package/[registry]/[name]/page.tsx # scored | not_scored | unresolved states globals.css # Tailwind import + @theme tokens (no custom utilities) @@ -81,7 +84,7 @@ lib/ changelog.ts # typed ChangelogEntry[] roadmap.ts # typed RoadmapVersion[] scripts/ - init-db.ts, score.ts, seed.ts, seed-list.ts + init-db.ts, score.ts, seed.ts, seed-list.ts, seed-packages.ts (auto-runs after seed.ts) tests/ _helpers.ts # makeFixture / removeFixture build synthetic trees under os.tmpdir() format.test.ts # compactStars, relativeTime, hostLabel @@ -93,9 +96,10 @@ tasks/ 0.1.0/ # released โ€” shipped record 0.2.0/ # released โ€” dogfood complete (tests, self-score, row-click) 0.3.0/ # released โ€” embeddable scores + broader coverage (badge, more agents, alternatives, package lookup) - 0.4.0/ # planned โ€” quick wins (history-aware signals + PR score-diff action + Claude Code skill) - 0.5.0/ # planned โ€” auto-refresh + smarter matching (webhook rescoring + alternatives v2) - 0.6.0/ # planned โ€” maintainer ownership + at-scale discovery (OAuth opt-out + package overlay at scale) + 0.4.0/ # released โ€” credible scores + discoverability (docs-cited rationales + agent-specific signals + About/llms.txt/OG) + 0.5.0/ # planned โ€” quick wins (history-aware signals + PR score-diff action + Claude Code skill) + 0.6.0/ # planned โ€” auto-refresh + smarter matching (webhook rescoring + alternatives v2) + 0.7.0/ # planned โ€” maintainer ownership + at-scale discovery (OAuth opt-out + package overlay at scale) 1.0.0/ # planned โ€” production cut (Postgres + at-scale indexing + benchmark harness) .claude/ settings.json # SessionStart + Stop hooks (Stop โ†’ hooks/stop-guard.sh) @@ -176,9 +180,9 @@ Hooks docs: . - We `git clone --depth 1 --single-branch` arbitrary URLs โ€” safe by default. We never run post-clone scripts, never `npm install`, never execute code from the clone. - SQL: all queries parameterised. No interpolation. -- HTML: React auto-escapes. The only `dangerouslySetInnerHTML` is server-built JSON-LD with `<` escaped to `<` (`app/layout.tsx`, `app/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`); never feed user-controlled strings into it. +- HTML: React auto-escapes. The only `dangerouslySetInnerHTML` is server-built JSON-LD with `<` escaped to `<` (`app/layout.tsx`, `app/page.tsx`, `app/methodology/page.tsx`, `app/repo/[id]/page.tsx`, `app/package/[registry]/[name]/page.tsx`); never feed user-controlled strings into it. - Local-path mode reads files; never writes outside `data/` and the clone workspace passed to `shallowClone`. -- No auth yet (read-only dashboard). When auth lands (`tasks/0.6.0/01-opt-out-claim-flow.md`), do it via OAuth and gate DB writes per user. +- No auth yet (read-only dashboard). When auth lands (`tasks/0.7.0/01-opt-out-claim-flow.md`), do it via OAuth and gate DB writes per user. **Operational concerns** (not code-level security) worth flagging before public launch: @@ -188,7 +192,7 @@ Hooks docs: . ## Things to leave alone -- Per-model weights are illustrative. Don't tune without `tasks/1.0.0/03-benchmark-harness.md`. +- Per-model rationales are derived from each agent's published documentation (see `MODELS[].sources` in `lib/scoring/weights.ts`); the weights themselves are still pre-benchmark. Do not tune individual values without re-running the docs audit (see `tasks/0.4.0/01-sourced-agent-rationales.md`) or shipping the v1.0.0 benchmark harness. - SQLite schema is intentionally simple. Flag before restructuring. - The I/O boundary. Scoring stays pure; DB stays in `lib/db.ts`. - `APP_VERSION` โ€” don't bump without a release. diff --git a/README.md b/README.md index c10fa5f..aa78440 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Agent Friendly Code -[![Release](https://img.shields.io/badge/release-0.3.0-blue?style=flat-square)](./lib/changelog.ts) +[![Release](https://img.shields.io/badge/release-0.4.0-blue?style=flat-square)](./lib/changelog.ts) [![License: MIT](https://img.shields.io/badge/license-MIT-green?style=flat-square)](./LICENSE) [![Next.js 16](https://img.shields.io/badge/Next.js-16-black?style=flat-square)](https://nextjs.org) [![Node โ‰ฅ20.9](https://img.shields.io/badge/node-%E2%89%A520.9-43853d?style=flat-square&logo=node.js&logoColor=white)](https://nodejs.org) @@ -9,7 +9,7 @@ **A public dashboard that ranks open-source repos by how friendly they are for AI coding agents โ€” per model.** -Next.js 16 + SQLite (`better-sqlite3`), styled with Tailwind CSS 4. Spans GitHub, GitLab, and Bitbucket out of the box. Current release: **0.3.0**. +Next.js 16 + SQLite (`better-sqlite3`), styled with Tailwind CSS 4. Spans GitHub, GitLab, and Bitbucket out of the box. Current release: **0.4.0**. ![Agent Friendly Code โ€” leaderboard](./public/demo/light.png) @@ -60,9 +60,9 @@ Two audiences: Not pretending the idea is free of risk: -- **Per-model scoring is the hardest part and the easiest to fake.** Today the weights are illustrative. Real "Claude ranks this higher than GPT-5" requires actually running each agent on each repo. That's `tasks/1.0.0/03-benchmark-harness.md`. +- **Per-model scoring is the hardest part and the easiest to fake.** Per-model rationales are now sourced from each agent's published docs (see `MODELS[].sources` in `lib/scoring/weights.ts`), but the weight values themselves are still pre-benchmark. Real "Claude ranks this higher than GPT-5" requires actually running each agent on each repo. That's `tasks/1.0.0/03-benchmark-harness.md`. - **Factory.ai is already in this space.** Differentiation has to stay sharp. -- **Public-shaming risk.** Ranking #47,823 without consent invites angry maintainers. Planned via `tasks/0.6.0/01-opt-out-claim-flow.md`. +- **Public-shaming risk.** Ranking #47,823 without consent invites angry maintainers. Planned via `tasks/0.7.0/01-opt-out-claim-flow.md`. - **Score gaming.** Once public, people add boilerplate `AGENTS.md` to pass the rubric without being useful. Dynamic (actually-run-an-agent) checks are the counter โ€” see benchmark harness. - **Freshness.** Scores decay with every push. Webhook-driven rescoring is roadmap. @@ -84,14 +84,14 @@ Short answer: **low risk**. The app: - Rate limiting the public API. - Sandbox the cloner in a container (future-proofing against hypothetical git CVEs). -Auth and per-maintainer controls land with the opt-out / claim flow in v0.6.0. +Auth and per-maintainer controls land with the opt-out / claim flow in v0.7.0. ## Quickstart ```bash bun install bun run prepare-hooks # once โ€” installs lefthook pre-commit (Biome + tsc + test + file-length) -bun run seed # score the curated set across GH / GL / BB +bun run seed # score the curated set across GH / GL / BB + cache popular package aliases bun run dev # http://localhost:3000 ``` @@ -110,7 +110,7 @@ Run the unit tests with `bun run test` (uses `node --test` + `tsx`; requires Nod ## Versioning -`lib/version.ts` and `package.json` carry the current release number (currently **0.3.0**). Bumps happen only when we actually cut a release โ€” never when merging intermediate work. The version pill in the header surfaces the number directly; `/changelog` lists what each release shipped. +`lib/version.ts` and `package.json` carry the current release number (currently **0.4.0**). Bumps happen only when we actually cut a release โ€” never when merging intermediate work. The version pill in the header surfaces the number directly; `/changelog` lists what each release shipped. ## Stack & rationale @@ -172,9 +172,9 @@ See `/roadmap` in the running app or the per-version `tasks/` folders for the fu Versions are sequenced cheap-first so the highest-impact small additions don't get gated on heavy infra: -- **0.4.0 โ€” quick wins**: history-aware signals (maintenance recency, commit velocity, contributor activity) + a GitHub Action that comments the score delta on every PR + a Claude Code skill (with public `/api/score` lookup) that recommends a model for the active repo. No new infra. -- **0.5.0 โ€” auto-refresh + smarter matching**: webhook-driven rescoring (keep scores fresh on every push) + alternatives via README embeddings (cross-language matches the v0.3.0 SQL heuristic misses). -- **0.6.0 โ€” maintainer ownership + at-scale discovery**: OAuth opt-out / claim flow for maintainers + at-scale package overlay (per-registry leaderboards + userscript that renders the badge inline on npmjs.com / PyPI / crates.io). +- **0.5.0 โ€” quick wins**: history-aware signals (maintenance recency, commit velocity, contributor activity) + a GitHub Action that comments the score delta on every PR + a Claude Code skill (with public `/api/score` lookup) that recommends a model for the active repo. No new infra. +- **0.6.0 โ€” auto-refresh + smarter matching**: webhook-driven rescoring (keep scores fresh on every push) + alternatives via README embeddings (cross-language matches the v0.3.0 SQL heuristic misses). +- **0.7.0 โ€” maintainer ownership + at-scale discovery**: OAuth opt-out / claim flow for maintainers + at-scale package overlay (per-registry leaderboards + userscript that renders the badge inline on npmjs.com / PyPI / crates.io). - **1.0.0 โ€” production cut**: Postgres migration for concurrent writers + auto-discovered crawl (target 10k repos) + benchmark harness that derives per-model weights from measured agent success. From here on, breaking API changes require a MAJOR bump. ## Defensibility diff --git a/app/about/page.tsx b/app/about/page.tsx new file mode 100644 index 0000000..3b3c7d3 --- /dev/null +++ b/app/about/page.tsx @@ -0,0 +1,116 @@ +import type { Metadata } from "next"; +import Link from "next/link"; + +import { Panel, PanelHeading } from "@/components/Panel"; +import { APP_NAME, REPO_URL } from "@/lib/version"; + +export const metadata: Metadata = { + title: "About", + alternates: { canonical: "/about" }, + twitter: { title: `About โ€” ${APP_NAME}` }, + openGraph: { title: `About โ€” ${APP_NAME}`, url: "/about" }, + description: `Who built ${APP_NAME}, why it exists, and what it isn't. Independent, MIT-licensed, no affiliation with any AI agent vendor.`, +}; + +export default function AboutPage() { + return ( + <> +
+

About

+

+ Who built {APP_NAME}, why it exists, and what it deliberately isn't. +

+
+ + + Who +

+ Built and maintained by{" "} + + Himanshu Singh + + . Independent project โ€” no affiliation with Anthropic, OpenAI, Google, Cognition, Anysphere, or any of the + agent vendors ranked here. +

+ + +
+ + Why this exists +

+ The gap between “repo with a README” and “repo that actually helps an AI coding agent ship + code” keeps widening, and there's no public way to tell who's doing the work. {APP_NAME}{" "} + tries to make that visible โ€” per model, because the agents aren't interchangeable. Claude Code wants an + AGENTS.md and a fast test loop; Cursor wants strong types and a skim-readable README; Devin wants a runnable + dev environment with declared deps and tests. The same repository can score very differently across them, + and a single overall number would hide that. +

+
+
+ +
+ + What it isn't +

+ This is not a benchmark of agent performance. Today every score is derived from{" "} + static signals โ€” file existence and content-length checks on the + cloned tree. No agent is actually run. Per-model rationales are derived from each agent's published + documentation (sources are linked on the methodology page), but the weight values themselves are still + pre-benchmark โ€” not yet calibrated against measured agent success. Read the{" "} + + methodology + {" "} + for the full picture, including the production-cut plan to replace pre-benchmark weights with measured ones. +

+
+
+ +
+ + Open source +

+ MIT-licensed. The signal definitions, weight profiles, scoring code, seed list, and every score in the + database are all in the{" "} + + source repository + + . If a repo's score looks wrong, file an issue with a link and the rubric to revisit; if a signal is + missing, propose one. +

+
+
+ +
+ + Contact + +

+ Best signal: open an issue or discussion on{" "} + + GitHub + + . +

+
+
+ + ); +} diff --git a/app/layout.tsx b/app/layout.tsx index b6a36a7..2bff538 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -69,6 +69,19 @@ const JSON_LD = { "query-input": "required name=search_term_string", }, }, + { + "@type": "WebApplication", + "@id": `${APP_URL}/#app`, + url: APP_URL, + name: APP_NAME, + operatingSystem: "Any", + isAccessibleForFree: true, + description: APP_DESCRIPTION, + publisher: { "@id": `${APP_URL}/#org` }, + applicationCategory: "DeveloperApplication", + offers: { "@type": "Offer", price: "0", priceCurrency: "USD" }, + browserRequirements: "Requires JavaScript-enabled modern browser", + }, ], }; @@ -88,10 +101,11 @@ const NAV_LINKS = [ const FOOTER_LINKS = [ { href: "/", label: "Home" }, - { href: "/methodology", label: "Methodology" }, - { href: "/roadmap", label: "Roadmap" }, + { href: "/about", label: "About" }, { href: "/changelog", label: "Changelog" }, + { href: "/methodology", label: "Methodology" }, { href: "/package", label: "Packages" }, + { href: "/roadmap", label: "Roadmap" }, ]; export default function RootLayout({ children }: { children: React.ReactNode }) { @@ -139,15 +153,19 @@ export default function RootLayout({ children }: { children: React.ReactNode })
{children}
diff --git a/app/llms.txt/route.ts b/app/llms.txt/route.ts new file mode 100644 index 0000000..cd3ca18 --- /dev/null +++ b/app/llms.txt/route.ts @@ -0,0 +1,44 @@ +import { MODELS } from "@/lib/scoring/weights"; +import { APP_DESCRIPTION, APP_NAME, APP_URL, REPO_URL } from "@/lib/version"; + +export const dynamic = "force-static"; + +const HEADERS = { + "Content-Type": "text/markdown; charset=utf-8", + "Cache-Control": "public, max-age=3600, s-maxage=86400", +}; + +export function GET(): Response { + const body = `# ${APP_NAME} + +> ${APP_DESCRIPTION} + +The scoring engine evaluates sixteen static signals per repository โ€” twelve cross-agent (AGENTS.md / CLAUDE.md, CI configuration, test suite, README, linter / formatter, type config, license, contributing guide, reproducible dev environment, pre-commit hooks, dependency manifest, codebase size) plus four agent-specific instruction files (\`.cursor/rules/*.mdc\` for Cursor, \`GEMINI.md\` for Gemini CLI, \`.openhands/setup.sh\` for OpenHands, \`.aider.conf.yml\` for Aider). Each AI coding agent has its own weight profile across those signals, so the same repository can score differently for different agents. + +## Models evaluated + +${MODELS.map((m) => `- ${m.label} โ€” ${m.rationale}`).join("\n")} + +## Key pages + +- [Leaderboard](${APP_URL}/): Per-model leaderboard across GitHub, GitLab, and Bitbucket +- [Methodology](${APP_URL}/methodology): How scores are computed; signals, weights, and limitations +- [Roadmap](${APP_URL}/roadmap): Upcoming versions +- [Changelog](${APP_URL}/changelog): What shipped per release +- [Sitemap](${APP_URL}/sitemap.xml): Every indexed URL + +## Public API + +- [\`GET /api/repos\`](${APP_URL}/api/repos): JSON dump of the leaderboard (id, owner, name, host, stars, overall_score, per-model scores) +- [\`GET /api/repo/{id}\`](${APP_URL}/api/repo/1): Per-repo detail โ€” signals, model scores, top improvements +- [\`GET /api/badge/{host}/{owner}/{name}.svg\`](${APP_URL}/api/badge/github/vercel/next.js.svg): Embeddable SVG badge (\`?model=\` for per-model) +- [\`GET /api/package/{registry}/{name}\`](${APP_URL}/api/package/npm/next): Resolve npm / PyPI / Cargo package โ†’ source-repo score + +## Source + +- [Repository](${REPO_URL}): MIT-licensed Next.js app +- [License](https://opensource.org/licenses/MIT): MIT +`; + + return new Response(body, { headers: HEADERS }); +} diff --git a/app/methodology/page.tsx b/app/methodology/page.tsx index d786112..aabed66 100644 --- a/app/methodology/page.tsx +++ b/app/methodology/page.tsx @@ -14,9 +14,61 @@ export const metadata: Metadata = { "How scores are computed today: the signals checked, the per-model weight profiles, the scoring formula, and what the static-heuristic approach deliberately doesn't measure yet.", }; +const FAQ = [ + { + q: "How is the agent-friendliness score computed?", + a: "Each repository is shallow-cloned and evaluated against sixteen static signals โ€” twelve cross-agent (AGENTS.md / CLAUDE.md, CI, tests, README, linter, type config, license, contributing guide, reproducible dev environment, pre-commit hooks, dependency manifest, codebase size) plus four agent-specific instruction files (`.cursor/rules/*.mdc`, `GEMINI.md`, `.openhands/setup.sh`, `.aider.conf.yml`). Per-model score = ฮฃ(signal.pass ร— model.weight[signal]) / ฮฃ(model.weight) ร— 100. Overall score = mean of per-model scores.", + }, + { + q: "Why score per model instead of giving one overall number?", + a: "Different agents lean on different repository properties โ€” and we know which because each vendor documents it. Claude Code loads CLAUDE.md at the start of every conversation, so AGENTS.md and tests carry the most weight. GPT-5 Codex reads AGENTS.md before doing any work, so AGENTS.md is the strongest single signal for it. Devin runs in a sandboxed VM and needs an explicit dev-env setup (deps, secrets, lint/test commands), so dev-environment beats CI. Cursor cites `.cursor/rules/` and AGENTS.md as its canonical instruction surface. The same repository can score very differently across models, and a single overall number would hide that.", + }, + { + q: "Which AI coding agents are evaluated?", + a: "Claude Code, Cursor, Devin, GPT-5 Codex, Gemini CLI, Aider, OpenHands, and Pi. Each has its own weight profile encoded in lib/scoring/weights.ts.", + }, + { + q: "Is this a benchmark of agent performance?", + a: "No. Today every score is derived from static signals โ€” file existence and content-length checks on the cloned tree. No agent is actually run. Per-model rationales are now derived from each agent's published documentation (see the Sources panel below for the URLs), but the weights themselves are still pre-benchmark โ€” they're not yet calibrated against measured agent success. Treat the numbers as a directional signal, not a verdict.", + }, + { + q: "How can I improve my repository's score?", + a: "Add an AGENTS.md or CLAUDE.md file describing the project for agents, configure CI, ensure tests run, write a substantive README, add a linter and type config, include a license and CONTRIBUTING guide, and provide a reproducible dev environment (devcontainer or Dockerfile). The repo detail page lists the highest-impact gaps for each model.", + }, + { + q: "What is AGENTS.md or CLAUDE.md?", + a: "A markdown file at the root of a repository that gives an AI coding agent a quick orientation: what the project is, how to build and test it, key conventions, and where to look. It is the highest-weighted signal for Pi, tied with the test suite as the top weight for Claude Code, and meaningfully helps every other agent.", + }, + { + q: "How often is the data refreshed?", + a: "Manually for now โ€” repositories are re-scored when the seed list changes or the rubric is updated. Automated periodic refresh is planned for v0.6.0.", + }, + { + q: "Which forges are supported?", + a: "GitHub, GitLab, and Bitbucket. Cross-forge support is built into the cloning and scoring pipeline so the leaderboard can compare repositories regardless of host.", + }, +]; + +const FAQ_JSON_LD = { + "@context": "https://schema.org", + "@type": "FAQPage", + mainEntity: FAQ.map((entry) => ({ + "@type": "Question", + name: entry.q, + acceptedAnswer: { "@type": "Answer", text: entry.a }, + })), +}; + export default function MethodologyPage() { return ( <> +