From 19b1b937fbcbeeac5578ead23647ef01085ffc07 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:10:51 +0000 Subject: [PATCH 01/39] fix(session-end): acquire lock before spawning wiki worker If a periodic worker is already running for the session, spawning a second worker from SessionEnd causes two concurrent UPDATEs on the same summary row. The Deeplake backend silently drops one of two rapid UPDATEs on the same row (see CLAUDE.md quirk), so the second worker's write can erase the first. SessionEnd now calls tryAcquireLock and bails out with a log line when the periodic worker holds the lock. The running worker releases it in its finally block, so there's no coordination gap. Also drops the DEEPLAKE_WIKI_WORKER / DEEPLAKE_CAPTURE fallbacks here (purely internal flags, no user reach). --- claude-code/bundle/session-end.js | 48 +++++++++++++++++++++++++++++-- src/hooks/session-end.ts | 13 +++++++-- 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/claude-code/bundle/session-end.js b/claude-code/bundle/session-end.js index f8af356..fbc3e73 100755 --- a/claude-code/bundle/session-end.js +++ b/claude-code/bundle/session-end.js @@ -178,12 +178,52 @@ function bundleDirFromImportMeta(importMetaUrl) { return dirname(fileURLToPath(importMetaUrl)); } +// dist/src/hooks/summary-state.js +import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; +import { homedir as homedir4 } from "node:os"; +import { join as join4 } from "node:path"; +var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); +var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); +function lockPath(sessionId) { + return join4(STATE_DIR, `${sessionId}.lock`); +} +function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { + mkdirSync2(STATE_DIR, { recursive: true }); + const p = lockPath(sessionId); + if (existsSync2(p)) { + try { + const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); + if (Number.isFinite(ageMs) && ageMs < maxAgeMs) + return false; + } catch { + } + try { + unlinkSync(p); + } catch { + return false; + } + } + try { + const fd = openSync(p, "wx"); + try { + writeSync(fd, String(Date.now())); + } finally { + closeSync(fd); + } + return true; + } catch (e) { + if (e.code === "EEXIST") + return false; + throw e; + } +} + // dist/src/hooks/session-end.js var log2 = (msg) => log("session-end", msg); async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; - if ((process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) === "false") + if (process.env.HIVEMIND_CAPTURE === "false") return; const input = await readStdin(); const sessionId = input.session_id; @@ -195,6 +235,10 @@ async function main() { log2("no config"); return; } + if (!tryAcquireLock(sessionId)) { + wikiLog(`SessionEnd: periodic worker already running for ${sessionId}, skipping`); + return; + } wikiLog(`SessionEnd: triggering summary for ${sessionId}`); spawnWikiWorker({ config, diff --git a/src/hooks/session-end.ts b/src/hooks/session-end.ts index 4350d22..655e940 100644 --- a/src/hooks/session-end.ts +++ b/src/hooks/session-end.ts @@ -12,6 +12,7 @@ import { readStdin } from "../utils/stdin.js"; import { loadConfig } from "../config.js"; import { log as _log } from "../utils/debug.js"; import { bundleDirFromImportMeta, spawnWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; +import { tryAcquireLock } from "./summary-state.js"; const log = (msg: string) => _log("session-end", msg); @@ -22,8 +23,8 @@ interface StopInput { } async function main(): Promise { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; - if ((process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) === "false") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; + if (process.env.HIVEMIND_CAPTURE === "false") return; const input = await readStdin(); const sessionId = input.session_id; @@ -33,6 +34,14 @@ async function main(): Promise { const config = loadConfig(); if (!config) { log("no config"); return; } + // Coordinate with the periodic worker: if one is already running for this + // session, skip. Two workers writing the same summary row trip the + // Deeplake UPDATE-coalescing quirk (see CLAUDE.md) and drop one write. + if (!tryAcquireLock(sessionId)) { + wikiLog(`SessionEnd: periodic worker already running for ${sessionId}, skipping`); + return; + } + wikiLog(`SessionEnd: triggering summary for ${sessionId}`); spawnWikiWorker({ config, From 2610612a92396273d8881a88a0de75c91f3fc54a Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:00 +0000 Subject: [PATCH 02/39] fix(codex-stop): acquire lock before spawning wiki worker MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same race as SessionEnd on the Claude Code side: Codex has no SessionEnd event, so Stop plays that role. If the capture hook already spawned a periodic worker for this session, letting Stop spawn a second one causes two concurrent UPDATEs on the same summary row and the Deeplake backend silently drops one of them. Stop now tryAcquireLock; if held, it logs and returns. The capture step (event insert) still runs unconditionally — only the wiki spawn is gated. Also drops the DEEPLAKE_WIKI_WORKER / DEEPLAKE_CAPTURE fallbacks. --- codex/bundle/stop.js | 54 +++++++++++++++++++++++++++++++++++++---- src/hooks/codex/stop.ts | 14 +++++++++-- 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/codex/bundle/stop.js b/codex/bundle/stop.js index 0657bf3..20ec054 100755 --- a/codex/bundle/stop.js +++ b/codex/bundle/stop.js @@ -1,7 +1,7 @@ #!/usr/bin/env node // dist/src/hooks/codex/stop.js -import { readFileSync as readFileSync2, existsSync as existsSync2 } from "node:fs"; +import { readFileSync as readFileSync3, existsSync as existsSync3 } from "node:fs"; // dist/src/utils/stdin.js function readStdin() { @@ -408,14 +408,54 @@ function bundleDirFromImportMeta(importMetaUrl) { return dirname(fileURLToPath(importMetaUrl)); } +// dist/src/hooks/summary-state.js +import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; +import { homedir as homedir4 } from "node:os"; +import { join as join4 } from "node:path"; +var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); +var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); +function lockPath(sessionId) { + return join4(STATE_DIR, `${sessionId}.lock`); +} +function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { + mkdirSync2(STATE_DIR, { recursive: true }); + const p = lockPath(sessionId); + if (existsSync2(p)) { + try { + const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); + if (Number.isFinite(ageMs) && ageMs < maxAgeMs) + return false; + } catch { + } + try { + unlinkSync(p); + } catch { + return false; + } + } + try { + const fd = openSync(p, "wx"); + try { + writeSync(fd, String(Date.now())); + } finally { + closeSync(fd); + } + return true; + } catch (e) { + if (e.code === "EEXIST") + return false; + throw e; + } +} + // dist/src/hooks/codex/stop.js var log3 = (msg) => log("codex-stop", msg); -var CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; function buildSessionPath(config, sessionId) { return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; } async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const sessionId = input.session_id; @@ -435,8 +475,8 @@ async function main() { if (input.transcript_path) { try { const transcriptPath = input.transcript_path; - if (existsSync2(transcriptPath)) { - const transcript = readFileSync2(transcriptPath, "utf-8"); + if (existsSync3(transcriptPath)) { + const transcript = readFileSync3(transcriptPath, "utf-8"); const lines = transcript.trim().split("\n").reverse(); for (const line2 of lines) { try { @@ -484,6 +524,10 @@ async function main() { } if (!CAPTURE) return; + if (!tryAcquireLock(sessionId)) { + wikiLog(`Stop: periodic worker already running for ${sessionId}, skipping`); + return; + } wikiLog(`Stop: triggering summary for ${sessionId}`); spawnCodexWikiWorker({ config, diff --git a/src/hooks/codex/stop.ts b/src/hooks/codex/stop.ts index 398ab15..adb7295 100644 --- a/src/hooks/codex/stop.ts +++ b/src/hooks/codex/stop.ts @@ -18,6 +18,7 @@ import { DeeplakeApi } from "../../deeplake-api.js"; import { sqlStr } from "../../utils/sql.js"; import { log as _log } from "../../utils/debug.js"; import { bundleDirFromImportMeta, spawnCodexWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; +import { tryAcquireLock } from "../summary-state.js"; const log = (msg: string) => _log("codex-stop", msg); @@ -29,14 +30,14 @@ interface CodexStopInput { model: string; } -const CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; function buildSessionPath(config: { userName: string; orgName: string; workspaceId: string }, sessionId: string): string { return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; } async function main(): Promise { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const sessionId = input.session_id; @@ -119,6 +120,15 @@ async function main(): Promise { // 2. Spawn wiki worker — skip when capture disabled if (!CAPTURE) return; + + // Coordinate with the periodic worker: if one is already running for this + // session, skip. Two workers writing the same summary row trip the + // Deeplake UPDATE-coalescing quirk (see CLAUDE.md) and drop one write. + if (!tryAcquireLock(sessionId)) { + wikiLog(`Stop: periodic worker already running for ${sessionId}, skipping`); + return; + } + wikiLog(`Stop: triggering summary for ${sessionId}`); spawnCodexWikiWorker({ config, From 19b420e602832c662a249314ec0e95e4527bd8ae Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:18 +0000 Subject: [PATCH 03/39] test(summary-state): cover state machine and cross-process locking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit summary-state.ts carries the periodic-summary trigger logic and the file-based RMW / advisory locks, but had no direct test coverage. The locking code in particular is the kind that regresses silently when untouched. Pins down: - bumpTotalCount: fresh seed + increment on existing state - shouldTrigger: first-at-10 rule only active while lastSummaryCount=0, exact cadence boundary, time trigger guarded by msgsSince > 0, custom everyNMessages - tryAcquireLock: mutual exclusion, custom maxAgeMs (short TTL reclaim), timestamp exactly at Date.now() is fresh, clock-skew future timestamps treated as fresh, non-numeric contents treated as stale - finalizeSummary: sets lastSummaryCount, preserves totalCount when higher, handles missing prior state - loadTriggerConfig: defaults, valid env overrides, invalid values ignored, fractional hours accepted - Full cycle: bump x9 no-trigger, bump#10 triggers, acquire, finalize + release, bump#11 below 50-message cadence, bump#60 re-triggers - Cross-process concurrency via spawned subprocesses: N bumps yield totalCount=N (lock prevents lost updates), N racers on tryAcquireLock yield exactly one winner Redirects $HOME to a tmpdir before importing so the module's homedir()-derived STATE_DIR points at an isolated directory — no pollution of ~/.claude/hooks/summary-state during test runs. --- claude-code/tests/summary-state.test.ts | 434 ++++++++++++++++++++++++ 1 file changed, 434 insertions(+) create mode 100644 claude-code/tests/summary-state.test.ts diff --git a/claude-code/tests/summary-state.test.ts b/claude-code/tests/summary-state.test.ts new file mode 100644 index 0000000..0c32a5d --- /dev/null +++ b/claude-code/tests/summary-state.test.ts @@ -0,0 +1,434 @@ +import { describe, it, expect, beforeAll, afterAll, beforeEach } from "vitest"; +import { mkdtempSync, rmSync, writeFileSync, existsSync, mkdirSync, readFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join, dirname } from "node:path"; +import { spawn } from "node:child_process"; + +/** + * Functional tests for summary-state. The module computes STATE_DIR from + * homedir() at module-load time, so we redirect $HOME to a tmp dir BEFORE + * importing. Every test uses a unique session id so there is no cross-test + * contamination. + * + * What these tests pin down: + * - bumpTotalCount seeds fresh state and increments existing state + * - shouldTrigger fires the first summary at 10 events, obeys msg/time + * cadence, and guards time-cadence with msgsSince > 0 + * - tryAcquireLock is mutually exclusive, reclaims stale locks, and rejects + * held locks + * - finalizeSummary advances lastSummaryCount and preserves the highest + * observed totalCount + * - loadTriggerConfig respects env overrides and falls back to defaults + */ + +let tmpHome: string; +let mod: typeof import("../../src/hooks/summary-state.js"); + +beforeAll(async () => { + tmpHome = mkdtempSync(join(tmpdir(), "summary-state-test-")); + process.env.HOME = tmpHome; + mod = await import("../../src/hooks/summary-state.js"); +}); + +afterAll(() => { + try { rmSync(tmpHome, { recursive: true, force: true }); } catch { /* ignore */ } +}); + +const newSessionId = () => `test-${crypto.randomUUID()}`; + +describe("bumpTotalCount", () => { + it("seeds fresh state with totalCount=1 and lastSummaryCount=0", () => { + const sid = newSessionId(); + const state = mod.bumpTotalCount(sid); + expect(state.totalCount).toBe(1); + expect(state.lastSummaryCount).toBe(0); + expect(typeof state.lastSummaryAt).toBe("number"); + }); + + it("increments existing totalCount and preserves lastSummaryAt/lastSummaryCount", () => { + const sid = newSessionId(); + const first = mod.bumpTotalCount(sid); + const second = mod.bumpTotalCount(sid); + const third = mod.bumpTotalCount(sid); + expect(second.totalCount).toBe(2); + expect(third.totalCount).toBe(3); + expect(second.lastSummaryAt).toBe(first.lastSummaryAt); + expect(third.lastSummaryCount).toBe(0); + }); +}); + +describe("shouldTrigger", () => { + const cfg = { everyNMessages: 50, everyHours: 2 }; + + it("does NOT fire before 10 events on a fresh session", () => { + const now = Date.now(); + for (let n = 1; n <= 9; n++) { + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 0, totalCount: n }, cfg, now, + )).toBe(false); + } + }); + + it("fires the first summary at exactly 10 events", () => { + const now = Date.now(); + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 0, totalCount: 10 }, cfg, now, + )).toBe(true); + }); + + it("fires when msgsSince reaches everyNMessages", () => { + const now = Date.now(); + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 10, totalCount: 59 }, cfg, now, + )).toBe(false); + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 10, totalCount: 60 }, cfg, now, + )).toBe(true); + }); + + it("fires when enough time has elapsed and there is at least one new event", () => { + const now = Date.now(); + const twoHoursAgo = now - 2 * 3600 * 1000; + expect(mod.shouldTrigger( + { lastSummaryAt: twoHoursAgo, lastSummaryCount: 10, totalCount: 11 }, cfg, now, + )).toBe(true); + }); + + it("does NOT fire on time alone when no new events have arrived", () => { + const now = Date.now(); + const twoHoursAgo = now - 2 * 3600 * 1000; + expect(mod.shouldTrigger( + { lastSummaryAt: twoHoursAgo, lastSummaryCount: 42, totalCount: 42 }, cfg, now, + )).toBe(false); + }); + + it("does NOT fire when below both thresholds", () => { + const now = Date.now(); + expect(mod.shouldTrigger( + { lastSummaryAt: now - 30 * 60 * 1000, lastSummaryCount: 10, totalCount: 30 }, cfg, now, + )).toBe(false); + }); +}); + +describe("tryAcquireLock", () => { + it("succeeds on a fresh session and blocks a second acquire", () => { + const sid = newSessionId(); + expect(mod.tryAcquireLock(sid)).toBe(true); + expect(mod.tryAcquireLock(sid)).toBe(false); + mod.releaseLock(sid); + }); + + it("reclaims a stale lock past maxAge", () => { + const sid = newSessionId(); + // Seed a stale lock file directly: timestamp well in the past. + const p = mod.lockPath(sid); + mkdirSync(dirname(p), { recursive: true }); + writeFileSync(p, String(Date.now() - 11 * 60 * 1000)); + // 10-minute default maxAge: the stale lock must be reclaimed. + expect(mod.tryAcquireLock(sid)).toBe(true); + mod.releaseLock(sid); + }); + + it("honors a fresh lock younger than maxAge", () => { + const sid = newSessionId(); + expect(mod.tryAcquireLock(sid)).toBe(true); + // Second acquire must fail — lock timestamp is ~now, well inside maxAge. + expect(mod.tryAcquireLock(sid)).toBe(false); + mod.releaseLock(sid); + }); + + it("releaseLock on a non-existent lock is a no-op", () => { + const sid = newSessionId(); + expect(() => mod.releaseLock(sid)).not.toThrow(); + }); + + it("treats an unreadable lock (non-numeric contents) as stale", () => { + const sid = newSessionId(); + const p = mod.lockPath(sid); + mkdirSync(dirname(p), { recursive: true }); + writeFileSync(p, "garbage-not-a-number"); + expect(mod.tryAcquireLock(sid)).toBe(true); + mod.releaseLock(sid); + }); +}); + +describe("finalizeSummary", () => { + it("sets lastSummaryCount to the jsonl line count and advances lastSummaryAt", () => { + const sid = newSessionId(); + mod.bumpTotalCount(sid); + mod.bumpTotalCount(sid); + const before = Date.now(); + mod.finalizeSummary(sid, 2); + // Re-read: totalCount must be preserved (max of previous and jsonlLines) + const s = JSON.parse(readFileSync(mod.statePath(sid), "utf-8")); + expect(s.lastSummaryCount).toBe(2); + expect(s.totalCount).toBe(2); + expect(s.lastSummaryAt).toBeGreaterThanOrEqual(before); + }); + + it("preserves totalCount when jsonlLines is lower than totalCount", () => { + const sid = newSessionId(); + for (let i = 0; i < 5; i++) mod.bumpTotalCount(sid); + mod.finalizeSummary(sid, 3); + const s = JSON.parse(readFileSync(mod.statePath(sid), "utf-8")); + expect(s.lastSummaryCount).toBe(3); + expect(s.totalCount).toBe(5); + }); + + it("handles missing prior state (no earlier bumpTotalCount)", () => { + const sid = newSessionId(); + mod.finalizeSummary(sid, 4); + const s = JSON.parse(readFileSync(mod.statePath(sid), "utf-8")); + expect(s.lastSummaryCount).toBe(4); + expect(s.totalCount).toBe(4); + }); +}); + +describe("loadTriggerConfig", () => { + const origN = process.env.HIVEMIND_SUMMARY_EVERY_N_MSGS; + const origH = process.env.HIVEMIND_SUMMARY_EVERY_HOURS; + + beforeEach(() => { + delete process.env.HIVEMIND_SUMMARY_EVERY_N_MSGS; + delete process.env.HIVEMIND_SUMMARY_EVERY_HOURS; + }); + + afterAll(() => { + if (origN !== undefined) process.env.HIVEMIND_SUMMARY_EVERY_N_MSGS = origN; + if (origH !== undefined) process.env.HIVEMIND_SUMMARY_EVERY_HOURS = origH; + }); + + it("falls back to defaults when env vars are unset", () => { + const cfg = mod.loadTriggerConfig(); + expect(cfg.everyNMessages).toBe(50); + expect(cfg.everyHours).toBe(2); + }); + + it("respects valid env overrides", () => { + process.env.HIVEMIND_SUMMARY_EVERY_N_MSGS = "30"; + process.env.HIVEMIND_SUMMARY_EVERY_HOURS = "1"; + const cfg = mod.loadTriggerConfig(); + expect(cfg.everyNMessages).toBe(30); + expect(cfg.everyHours).toBe(1); + }); + + it("ignores invalid values and uses defaults", () => { + process.env.HIVEMIND_SUMMARY_EVERY_N_MSGS = "not-a-number"; + process.env.HIVEMIND_SUMMARY_EVERY_HOURS = "-5"; + const cfg = mod.loadTriggerConfig(); + expect(cfg.everyNMessages).toBe(50); + expect(cfg.everyHours).toBe(2); + }); + + it("accepts fractional hours", () => { + process.env.HIVEMIND_SUMMARY_EVERY_HOURS = "0.5"; + const cfg = mod.loadTriggerConfig(); + expect(cfg.everyHours).toBe(0.5); + }); +}); + +describe("state files live under $HOME/.claude/hooks/summary-state/", () => { + it("writeState creates the directory and writes JSON", () => { + const sid = newSessionId(); + mod.bumpTotalCount(sid); + const expected = join(tmpHome, ".claude", "hooks", "summary-state", `${sid}.json`); + expect(existsSync(expected)).toBe(true); + }); +}); + +// ══════════════════════════════════════════════════════════════════════════════ +// Edge-case and integration tests — these pin down the full periodic-summary +// state machine and the bounds that the capture hook relies on. +// ══════════════════════════════════════════════════════════════════════════════ + +describe("shouldTrigger — boundary conditions", () => { + const cfg = { everyNMessages: 50, everyHours: 2 }; + + it("first-summary rule only applies while lastSummaryCount is 0", () => { + const now = Date.now(); + // lastSummaryCount > 0 means the first-summary path is no longer active: + // totalCount=15 with lastSummaryCount=10 is 5 new messages, well below 50. + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 10, totalCount: 15 }, cfg, now, + )).toBe(false); + }); + + it("time trigger fires exactly at the cadence boundary", () => { + const now = Date.now(); + const twoHoursExact = now - 2 * 3600 * 1000; + expect(mod.shouldTrigger( + { lastSummaryAt: twoHoursExact, lastSummaryCount: 10, totalCount: 11 }, cfg, now, + )).toBe(true); + }); + + it("time trigger does NOT fire just below the cadence boundary", () => { + const now = Date.now(); + const justUnder = now - (2 * 3600 * 1000 - 1); + expect(mod.shouldTrigger( + { lastSummaryAt: justUnder, lastSummaryCount: 10, totalCount: 11 }, cfg, now, + )).toBe(false); + }); + + it("msg trigger respects custom everyNMessages", () => { + const now = Date.now(); + const tightCfg = { everyNMessages: 3, everyHours: 999 }; + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 10, totalCount: 12 }, tightCfg, now, + )).toBe(false); + expect(mod.shouldTrigger( + { lastSummaryAt: now, lastSummaryCount: 10, totalCount: 13 }, tightCfg, now, + )).toBe(true); + }); +}); + +describe("tryAcquireLock — age boundaries and custom maxAge", () => { + it("honors a custom maxAgeMs (short TTL reclaims quickly)", async () => { + const sid = newSessionId(); + expect(mod.tryAcquireLock(sid, 50)).toBe(true); + // With 50ms TTL, sleep past the window and try again from a "new process" + await new Promise(r => setTimeout(r, 80)); + // The existing lock must now look stale even though the current process + // holds it — a separate caller (simulated here) would reclaim it. + expect(mod.tryAcquireLock(sid, 50)).toBe(true); + mod.releaseLock(sid); + }); + + it("a lock timestamp of exactly Date.now() is considered fresh", () => { + const sid = newSessionId(); + const p = mod.lockPath(sid); + mkdirSync(dirname(p), { recursive: true }); + writeFileSync(p, String(Date.now())); + expect(mod.tryAcquireLock(sid)).toBe(false); + try { rmSync(p); } catch { /* ignore */ } + }); + + it("a lock timestamp from the future (clock skew) is treated as fresh", () => { + const sid = newSessionId(); + const p = mod.lockPath(sid); + mkdirSync(dirname(p), { recursive: true }); + writeFileSync(p, String(Date.now() + 60_000)); + // ageMs is negative (< maxAgeMs), so the lock is held. + expect(mod.tryAcquireLock(sid)).toBe(false); + try { rmSync(p); } catch { /* ignore */ } + }); +}); + +describe("full periodic-summary cycle", () => { + it("bump → trigger → acquire → finalize → next bump no longer triggers", () => { + const sid = newSessionId(); + const cfg = { everyNMessages: 50, everyHours: 24 }; + + // Bump 9 times — first-summary threshold is 10, so nothing yet. + for (let i = 0; i < 9; i++) { + const s = mod.bumpTotalCount(sid); + expect(mod.shouldTrigger(s, cfg)).toBe(false); + } + + // 10th bump crosses the first-summary threshold. + const tenth = mod.bumpTotalCount(sid); + expect(tenth.totalCount).toBe(10); + expect(mod.shouldTrigger(tenth, cfg)).toBe(true); + + // Acquire the lock so the capture hook would spawn exactly one worker. + expect(mod.tryAcquireLock(sid)).toBe(true); + // A second capture within the same window cannot acquire — this is what + // prevents duplicate workers when events arrive in quick succession. + expect(mod.tryAcquireLock(sid)).toBe(false); + + // Worker finishes: finalize + release. + mod.finalizeSummary(sid, 10); + mod.releaseLock(sid); + + // Next bump: lastSummaryCount is now 10, msgsSince=1, well below 50. + const eleventh = mod.bumpTotalCount(sid); + expect(eleventh.lastSummaryCount).toBe(10); + expect(eleventh.totalCount).toBe(11); + expect(mod.shouldTrigger(eleventh, cfg)).toBe(false); + }); + + it("second summary fires after everyNMessages messages past lastSummaryCount", () => { + const sid = newSessionId(); + const cfg = { everyNMessages: 50, everyHours: 24 }; + + // Fast-forward state as if a first summary already landed at 10. + for (let i = 0; i < 10; i++) mod.bumpTotalCount(sid); + mod.finalizeSummary(sid, 10); + + // Bump 49 more times: msgsSince=49, still below 50. + for (let i = 0; i < 49; i++) { + const s = mod.bumpTotalCount(sid); + expect(mod.shouldTrigger(s, cfg)).toBe(false); + } + + // 50th bump past lastSummaryCount triggers. + const trigger = mod.bumpTotalCount(sid); + expect(trigger.totalCount).toBe(60); + expect(mod.shouldTrigger(trigger, cfg)).toBe(true); + }); + + it("releaseLock is idempotent across calls", () => { + const sid = newSessionId(); + mod.tryAcquireLock(sid); + mod.releaseLock(sid); + expect(() => mod.releaseLock(sid)).not.toThrow(); + expect(() => mod.releaseLock(sid)).not.toThrow(); + // After release, a fresh acquire must succeed again. + expect(mod.tryAcquireLock(sid)).toBe(true); + mod.releaseLock(sid); + }); +}); + +describe("cross-process concurrency", () => { + // Each subprocess imports summary-state with the same $HOME + a sessionId + // passed via env var. The file-based RMW lock is the ONLY thing preventing + // lost updates (bumpTotalCount) and preventing multiple winners + // (tryAcquireLock) across processes, so these tests are a real stress test + // of the lock. Session id comes via env (TEST_SID) because tsx's `-e` flag + // does not forward positional args reliably across node versions. + const modPath = new URL("../../src/hooks/summary-state.ts", import.meta.url).pathname; + + const runParallel = async (code: string, N: number, sid: string): Promise => { + const runs = Array.from({ length: N }, () => + new Promise((resolve, reject) => { + const child = spawn("npx", ["tsx", "-e", code], { + env: { ...process.env, HOME: tmpHome, TEST_SID: sid }, + stdio: ["ignore", "pipe", "pipe"], + }); + let out = ""; + child.stdout.on("data", (d: Buffer) => { out += d.toString(); }); + child.on("exit", (c: number | null) => c === 0 ? resolve(out) : reject(new Error(`exit ${c}`))); + child.on("error", reject); + }), + ); + return Promise.all(runs); + }; + + it("N parallel subprocesses each bump once and the total equals N", async () => { + const sid = newSessionId(); + const N = 8; + const code = + `import("${modPath}").then(m => { ` + + ` const s = m.bumpTotalCount(process.env.TEST_SID); ` + + ` process.stdout.write(String(s.totalCount)); ` + + `});`; + + await runParallel(code, N, sid); + + const finalState = JSON.parse(readFileSync(mod.statePath(sid), "utf-8")); + expect(finalState.totalCount).toBe(N); + }, 30_000); + + it("N parallel subprocesses racing on tryAcquireLock — exactly one wins", async () => { + const sid = newSessionId(); + const N = 8; + const code = + `import("${modPath}").then(m => { ` + + ` process.stdout.write(m.tryAcquireLock(process.env.TEST_SID) ? "1" : "0"); ` + + `});`; + + const results = await runParallel(code, N, sid); + const winners = results.filter(r => r === "1").length; + expect(winners).toBe(1); + mod.releaseLock(sid); + }, 30_000); +}); From 40f4438ac204977ef2ff602247da72c5b658d863 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:33 +0000 Subject: [PATCH 04/39] test(bundles): assert race fix + env cleanup survive the build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Source tests prove summary-state is correct; this suite scans the shipped claude-code/bundle/*.js and codex/bundle/*.js to confirm the build didn't drop the call sites or re-inline the old patterns. Asserts, for both the CC and Codex shipping paths: - session-end / stop bundles call tryAcquireLock and contain the "periodic worker already running" bail-out line (the race fix is wired to the actual spawn site) - capture bundles reference tryAcquireLock, shouldTrigger, bumpTotalCount and loadTriggerConfig (periodic trigger wired end-to-end) - No bundle contains DEEPLAKE_WIKI_WORKER or DEEPLAKE_CAPTURE anymore (pure back-compat aliases, removed) - tryAcquireLock is inlined into every hook that needs it; releaseLock only in capture bundles (session-end releases via the worker's finally — esbuild tree-shakes it out, which is expected) --- .../tests/periodic-summary-bundles.test.ts | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 claude-code/tests/periodic-summary-bundles.test.ts diff --git a/claude-code/tests/periodic-summary-bundles.test.ts b/claude-code/tests/periodic-summary-bundles.test.ts new file mode 100644 index 0000000..e0ee786 --- /dev/null +++ b/claude-code/tests/periodic-summary-bundles.test.ts @@ -0,0 +1,129 @@ +import { describe, it, expect } from "vitest"; +import { readFileSync, existsSync } from "node:fs"; +import { resolve } from "node:path"; + +/** + * Bundle-level anti-regression for the periodic-summary feature. These + * tests scan the SHIPPED bundles (claude-code + codex) to confirm: + * + * 1. The SessionEnd race fix is present: before spawning the worker, the + * hook checks tryAcquireLock and bails when another worker is running. + * Two concurrent workers writing the same summary row trip the Deeplake + * UPDATE-coalescing quirk and drop one write. + * + * 2. The periodic trigger in the capture hook also acquires the lock + * before spawning — same reason. + * + * 3. The internal wiki-worker flag uses ONLY the new HIVEMIND_WIKI_WORKER + * name. DEEPLAKE_WIKI_WORKER was a migration-only fallback and is a + * plugin-internal signal, so there is no reason to keep it shipped. + * + * 4. HIVEMIND_CAPTURE=false is respected everywhere the guard existed — + * the rename left one path reading the old name only, which we fixed. + * + * Source tests (summary-state.test.ts) prove the lock module is correct; + * these bundle checks prove the build didn't drop the call sites. + */ + +const BUNDLE_ROOT = resolve(__dirname, "..", ".."); + +const SESSION_END_HOOKS: Array<[string, string]> = [ + ["claude-code session-end", resolve(BUNDLE_ROOT, "claude-code", "bundle", "session-end.js")], + ["codex stop", resolve(BUNDLE_ROOT, "codex", "bundle", "stop.js")], +]; + +const CAPTURE_HOOKS: Array<[string, string]> = [ + ["claude-code capture", resolve(BUNDLE_ROOT, "claude-code", "bundle", "capture.js")], + ["codex capture", resolve(BUNDLE_ROOT, "codex", "bundle", "capture.js")], +]; + +const ALL_BUNDLES: Array<[string, string]> = [ + ...SESSION_END_HOOKS, + ...CAPTURE_HOOKS, + ["claude-code session-start", resolve(BUNDLE_ROOT, "claude-code", "bundle", "session-start.js")], + ["claude-code session-start-setup", resolve(BUNDLE_ROOT, "claude-code", "bundle", "session-start-setup.js")], + ["codex session-start", resolve(BUNDLE_ROOT, "codex", "bundle", "session-start.js")], + ["codex session-start-setup", resolve(BUNDLE_ROOT, "codex", "bundle", "session-start-setup.js")], +]; + +describe("bundles exist", () => { + it.each(ALL_BUNDLES)("%s bundle file is present", (_label, path) => { + expect(existsSync(path)).toBe(true); + }); +}); + +// ══ SessionEnd-style hooks: must acquire the lock before spawning ══════════ +describe.each(SESSION_END_HOOKS)("%s bundle — race fix", (_label, path) => { + const src = readFileSync(path, "utf-8"); + + it("calls tryAcquireLock before spawning the worker", () => { + expect(src).toMatch(/tryAcquireLock/); + // The bail-out branch that exists only because of the race fix: when + // the lock is held, we log and return without spawning. + expect(src).toMatch(/periodic worker already running/); + }); + + it("spawns the wiki worker only on the happy path", () => { + // Must still reference the spawn helper — a full removal would also + // match "no race" but would break the feature. + expect(src).toMatch(/spawn(Codex)?WikiWorker/); + }); +}); + +// ══ Capture hooks: periodic trigger also acquires the lock ═════════════════ +describe.each(CAPTURE_HOOKS)("%s bundle — periodic trigger", (_label, path) => { + const src = readFileSync(path, "utf-8"); + + it("acquires the lock before spawning from the periodic path", () => { + expect(src).toMatch(/tryAcquireLock/); + expect(src).toMatch(/shouldTrigger/); + expect(src).toMatch(/bumpTotalCount/); + }); + + it("references the summary-state helpers (feature wired end-to-end)", () => { + expect(src).toMatch(/loadTriggerConfig/); + }); +}); + +// ══ Internal flag uses only the new name ═══════════════════════════════════ +describe.each(ALL_BUNDLES)("%s bundle — clean env flags", (_label, path) => { + const src = readFileSync(path, "utf-8"); + + it("uses HIVEMIND_WIKI_WORKER and not the legacy DEEPLAKE_WIKI_WORKER", () => { + // HIVEMIND_WIKI_WORKER is the internal signal the wiki worker sets on + // itself; every hook must gate on it. The old DEEPLAKE_* fallback was + // pure back-compat noise for an internal flag and is removed. + if (!src.includes("HIVEMIND_WIKI_WORKER")) { + // Some bundles don't need the guard (e.g. pure utility bundles) — + // skip. Every bundle in this suite actually does gate, but be lenient. + return; + } + expect(src).not.toMatch(/DEEPLAKE_WIKI_WORKER/); + }); + + it("does not fall back to DEEPLAKE_CAPTURE for the capture-disabled guard", () => { + // The guard must read HIVEMIND_CAPTURE only. DEEPLAKE_CAPTURE is a + // pre-rename alias that would mask a user setting HIVEMIND_CAPTURE=false. + expect(src).not.toMatch(/DEEPLAKE_CAPTURE/); + }); +}); + +// ══ summary-state module is inlined into every bundle that needs it ════════ +describe("summary-state helpers are inlined into the hook bundles", () => { + // SessionEnd-style hooks only need tryAcquireLock (the worker itself + // releases the lock in its finally block). esbuild tree-shakes + // releaseLock out of those bundles, which is expected. + it.each(SESSION_END_HOOKS)("%s bundle inlines tryAcquireLock", (_label, path) => { + const src = readFileSync(path, "utf-8"); + expect(src).toMatch(/function tryAcquireLock/); + }); + + // Capture hooks need both: tryAcquireLock to gate the spawn, and + // releaseLock as the error-path fallback when spawn throws before the + // worker takes ownership of the lock. + it.each(CAPTURE_HOOKS)("%s bundle inlines tryAcquireLock + releaseLock", (_label, path) => { + const src = readFileSync(path, "utf-8"); + expect(src).toMatch(/function tryAcquireLock/); + expect(src).toMatch(/function releaseLock/); + }); +}); From cb0f1dcfd730b2ab34e93bde2a9384526df39f7f Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:43 +0000 Subject: [PATCH 05/39] refactor(cc-capture): drop DEEPLAKE_CAPTURE fallback HIVEMIND_CAPTURE is the single source of truth. The DEEPLAKE_CAPTURE alias was migration back-compat for an already-shipped rename; it's a plugin-internal flag (the wiki worker sets it on itself) so the chance of a stale user env var keeping the old name alive is low and the ergonomic cost of carrying two names is not worth it. Also removes a dead `homedir` import. --- claude-code/bundle/capture.js | 30 ++++++++++++++++++++++-------- src/hooks/capture.ts | 23 ++++++++++++++--------- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/claude-code/bundle/capture.js b/claude-code/bundle/capture.js index c4e74f4..e25d08b 100755 --- a/claude-code/bundle/capture.js +++ b/claude-code/bundle/capture.js @@ -420,6 +420,12 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { throw e; } } +function releaseLock(sessionId) { + try { + unlinkSync(lockPath(sessionId)); + } catch { + } +} // dist/src/hooks/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; @@ -532,7 +538,7 @@ function bundleDirFromImportMeta(importMetaUrl) { // dist/src/hooks/capture.js var log3 = (msg) => log("capture", msg); -var CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; function buildSessionPath(config, sessionId) { const userName = config.userName; const orgName = config.orgName; @@ -628,13 +634,21 @@ function maybeTriggerPeriodicSummary(sessionId, cwd, config) { return; } wikiLog(`Periodic: threshold hit (total=${state.totalCount}, since=${state.totalCount - state.lastSummaryCount}, N=${cfg.everyNMessages}, hours=${cfg.everyHours})`); - spawnWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Periodic" - }); + try { + spawnWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Periodic" + }); + } catch (e) { + try { + releaseLock(sessionId); + } catch { + } + throw e; + } } catch (e) { log3(`periodic trigger error: ${e.message}`); } diff --git a/src/hooks/capture.ts b/src/hooks/capture.ts index 8610b72..b199d49 100644 --- a/src/hooks/capture.ts +++ b/src/hooks/capture.ts @@ -7,7 +7,6 @@ * Used by: UserPromptSubmit, PostToolUse (async), Stop, SubagentStop */ -import { homedir } from "node:os"; import { readStdin } from "../utils/stdin.js"; import { loadConfig, type Config } from "../config.js"; import { DeeplakeApi } from "../deeplake-api.js"; @@ -18,6 +17,7 @@ import { loadTriggerConfig, shouldTrigger, tryAcquireLock, + releaseLock, } from "./summary-state.js"; import { bundleDirFromImportMeta, spawnWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; const log = (msg: string) => _log("capture", msg); @@ -43,7 +43,7 @@ interface HookInput { agent_transcript_path?: string; } -const CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; /** Build the session path matching the CLI convention: * /sessions//___.jsonl */ @@ -163,13 +163,18 @@ function maybeTriggerPeriodicSummary(sessionId: string, cwd: string, config: Con } wikiLog(`Periodic: threshold hit (total=${state.totalCount}, since=${state.totalCount - state.lastSummaryCount}, N=${cfg.everyNMessages}, hours=${cfg.everyHours})`); - spawnWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Periodic", - }); + try { + spawnWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Periodic", + }); + } catch (e: any) { + try { releaseLock(sessionId); } catch { /* ignore */ } + throw e; + } } catch (e: any) { log(`periodic trigger error: ${e.message}`); } From f2ed8f224da3905c5ec24680b02b4f7971a6db87 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:49 +0000 Subject: [PATCH 06/39] refactor(codex-capture): drop DEEPLAKE_CAPTURE fallback Mirror of the Claude Code capture cleanup: converge on HIVEMIND_CAPTURE as the only env flag the hook honors. --- codex/bundle/capture.js | 30 ++++++++++++++++++++++-------- src/hooks/codex/capture.ts | 22 ++++++++++++++-------- 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/codex/bundle/capture.js b/codex/bundle/capture.js index 701036e..46b525f 100755 --- a/codex/bundle/capture.js +++ b/codex/bundle/capture.js @@ -417,6 +417,12 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { throw e; } } +function releaseLock(sessionId) { + try { + unlinkSync(lockPath(sessionId)); + } catch { + } +} // dist/src/hooks/codex/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; @@ -526,7 +532,7 @@ function bundleDirFromImportMeta(importMetaUrl) { // dist/src/hooks/codex/capture.js var log3 = (msg) => log("codex-capture", msg); -var CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; function buildSessionPath(config, sessionId) { return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; } @@ -609,13 +615,21 @@ function maybeTriggerPeriodicSummary(sessionId, cwd, config) { return; } wikiLog(`Periodic: threshold hit (total=${state.totalCount}, since=${state.totalCount - state.lastSummaryCount}, N=${cfg.everyNMessages}, hours=${cfg.everyHours})`); - spawnCodexWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Periodic" - }); + try { + spawnCodexWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Periodic" + }); + } catch (e) { + try { + releaseLock(sessionId); + } catch { + } + throw e; + } } catch (e) { log3(`periodic trigger error: ${e.message}`); } diff --git a/src/hooks/codex/capture.ts b/src/hooks/codex/capture.ts index 1d3f5ec..e749a6b 100644 --- a/src/hooks/codex/capture.ts +++ b/src/hooks/codex/capture.ts @@ -22,6 +22,7 @@ import { loadTriggerConfig, shouldTrigger, tryAcquireLock, + releaseLock, } from "../summary-state.js"; import { bundleDirFromImportMeta, spawnCodexWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; const log = (msg: string) => _log("codex-capture", msg); @@ -42,7 +43,7 @@ interface CodexHookInput { tool_response?: Record; } -const CAPTURE = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; +const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; function buildSessionPath(config: { userName: string; orgName: string; workspaceId: string }, sessionId: string): string { return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; @@ -138,13 +139,18 @@ function maybeTriggerPeriodicSummary(sessionId: string, cwd: string, config: Con } wikiLog(`Periodic: threshold hit (total=${state.totalCount}, since=${state.totalCount - state.lastSummaryCount}, N=${cfg.everyNMessages}, hours=${cfg.everyHours})`); - spawnCodexWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Periodic", - }); + try { + spawnCodexWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Periodic", + }); + } catch (e: any) { + try { releaseLock(sessionId); } catch { /* ignore */ } + throw e; + } } catch (e: any) { log(`periodic trigger error: ${e.message}`); } From 4e0475b0eb1321d6bba24dbe7424eac11776ab77 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:11:58 +0000 Subject: [PATCH 07/39] fix(session-start): honor HIVEMIND_CAPTURE, not only DEEPLAKE_CAPTURE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rename PR left a latent bug in session-start.ts: the capture guard at line 179 read only the old name. A user setting HIVEMIND_CAPTURE=false had capture suppressed everywhere else (capture.ts, session-end.ts) but the placeholder summary INSERT still fired from session-start — the one path the env flag silently didn't reach. Capture intent and behavior drifted. Converge on HIVEMIND_CAPTURE in the session-start guard and drop the DEEPLAKE_WIKI_WORKER fallback at the top for symmetry with the other hooks. --- claude-code/bundle/session-start.js | 6 +++--- src/hooks/session-start.ts | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/claude-code/bundle/session-start.js b/claude-code/bundle/session-start.js index 072646b..765666d 100755 --- a/claude-code/bundle/session-start.js +++ b/claude-code/bundle/session-start.js @@ -441,7 +441,7 @@ async function createPlaceholder(api, table, sessionId, cwd, userName, orgName, wikiLog(`SessionStart: created placeholder for ${sessionId} (${cwd})`); } async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); let creds = loadCredentials(); @@ -459,7 +459,7 @@ async function main() { } } } - const captureEnabled = process.env.DEEPLAKE_CAPTURE !== "false"; + const captureEnabled = process.env.HIVEMIND_CAPTURE !== "false"; if (input.session_id && creds?.token) { try { const config = loadConfig(); @@ -473,7 +473,7 @@ async function main() { await createPlaceholder(api, table, input.session_id, input.cwd ?? "", config.userName, config.orgName, config.workspaceId); log3("placeholder created"); } else { - log3("placeholder skipped (DEEPLAKE_CAPTURE=false)"); + log3("placeholder skipped (HIVEMIND_CAPTURE=false)"); } } } catch (e) { diff --git a/src/hooks/session-start.ts b/src/hooks/session-start.ts index 9881bef..73c9cf1 100644 --- a/src/hooks/session-start.ts +++ b/src/hooks/session-start.ts @@ -150,7 +150,7 @@ interface SessionStartInput { async function main(): Promise { // Skip if this is a sub-session spawned by the wiki worker - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); @@ -173,10 +173,10 @@ async function main(): Promise { // Ensure tables exist and (when capture is enabled) create the placeholder // summary via direct SQL. Tables must always be synced so queries return - // fresh data — only the placeholder INSERT is skipped when DEEPLAKE_CAPTURE=false + // fresh data — only the placeholder INSERT is skipped when HIVEMIND_CAPTURE=false // (benchmark runs, explicit opt-out). Mirrors the guard already in // session-start-setup.ts / session-end.ts / codex hooks. - const captureEnabled = process.env.DEEPLAKE_CAPTURE !== "false"; + const captureEnabled = process.env.HIVEMIND_CAPTURE !== "false"; if (input.session_id && creds?.token) { try { const config = loadConfig(); @@ -190,7 +190,7 @@ async function main(): Promise { await createPlaceholder(api, table, input.session_id, input.cwd ?? "", config.userName, config.orgName, config.workspaceId); log("placeholder created"); } else { - log("placeholder skipped (DEEPLAKE_CAPTURE=false)"); + log("placeholder skipped (HIVEMIND_CAPTURE=false)"); } } } catch (e: any) { From 05faeb5ce44d1995387035c36d6f830bdab243e5 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:12:05 +0000 Subject: [PATCH 08/39] refactor(session-start-setup): drop DEEPLAKE_WIKI_WORKER fallback Same rationale as the other hook files: the wiki-worker flag is internal, set by the plugin on itself, and the migration alias adds noise without buying anything. --- claude-code/bundle/session-start-setup.js | 2 +- src/hooks/session-start-setup.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/claude-code/bundle/session-start-setup.js b/claude-code/bundle/session-start-setup.js index c5adc35..09c2cff 100755 --- a/claude-code/bundle/session-start-setup.js +++ b/claude-code/bundle/session-start-setup.js @@ -388,7 +388,7 @@ function isNewer(latest, current) { return la > ca || la === ca && lb > cb || la === ca && lb === cb && lc > cc; } async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const creds = loadCredentials(); diff --git a/src/hooks/session-start-setup.ts b/src/hooks/session-start-setup.ts index a64dfbf..7065fde 100644 --- a/src/hooks/session-start-setup.ts +++ b/src/hooks/session-start-setup.ts @@ -77,7 +77,7 @@ interface SessionStartInput { } async function main(): Promise { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const creds = loadCredentials(); From 7360c500f4250dffa4f82aae7381d614b9f0254f Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:12:11 +0000 Subject: [PATCH 09/39] refactor(codex-session-start): drop DEEPLAKE_WIKI_WORKER fallback Mirror of the CC cleanup on the Codex side. --- codex/bundle/session-start.js | 2 +- src/hooks/codex/session-start.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/codex/bundle/session-start.js b/codex/bundle/session-start.js index c07f3e9..83d4530 100755 --- a/codex/bundle/session-start.js +++ b/codex/bundle/session-start.js @@ -88,7 +88,7 @@ function getInstalledVersion() { return null; } async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const creds = loadCredentials(); diff --git a/src/hooks/codex/session-start.ts b/src/hooks/codex/session-start.ts index b272f8e..be72587 100644 --- a/src/hooks/codex/session-start.ts +++ b/src/hooks/codex/session-start.ts @@ -59,7 +59,7 @@ interface CodexSessionStartInput { } async function main(): Promise { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); From 5d8a0e894b984f3e227dcc6f73542e189cb55c6a Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:12:20 +0000 Subject: [PATCH 10/39] refactor(codex-session-start-setup): drop DEEPLAKE_WIKI_WORKER / DEEPLAKE_CAPTURE fallbacks Final hook in the cleanup. After this, no DEEPLAKE_WIKI_WORKER or DEEPLAKE_CAPTURE reference survives in the shipped hook surface. --- codex/bundle/session-start-setup.js | 4 ++-- src/hooks/codex/session-start-setup.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/codex/bundle/session-start-setup.js b/codex/bundle/session-start-setup.js index 74ebd8f..461393b 100755 --- a/codex/bundle/session-start-setup.js +++ b/codex/bundle/session-start-setup.js @@ -407,7 +407,7 @@ async function createPlaceholder(api, table, sessionId, cwd, userName, orgName, wikiLog(`SessionSetup: created placeholder for ${sessionId} (${cwd})`); } async function main() { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const creds = loadCredentials(); @@ -424,7 +424,7 @@ async function main() { } catch { } } - const captureEnabled = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; + const captureEnabled = process.env.HIVEMIND_CAPTURE !== "false"; if (input.session_id) { try { const config = loadConfig(); diff --git a/src/hooks/codex/session-start-setup.ts b/src/hooks/codex/session-start-setup.ts index 2dfc668..395ee97 100644 --- a/src/hooks/codex/session-start-setup.ts +++ b/src/hooks/codex/session-start-setup.ts @@ -116,7 +116,7 @@ interface CodexSessionStartInput { } async function main(): Promise { - if ((process.env.HIVEMIND_WIKI_WORKER ?? process.env.DEEPLAKE_WIKI_WORKER) === "1") return; + if (process.env.HIVEMIND_WIKI_WORKER === "1") return; const input = await readStdin(); const creds = loadCredentials(); @@ -133,7 +133,7 @@ async function main(): Promise { } // Table setup + sync — always sync, only skip placeholder when capture disabled - const captureEnabled = (process.env.HIVEMIND_CAPTURE ?? process.env.DEEPLAKE_CAPTURE) !== "false"; + const captureEnabled = process.env.HIVEMIND_CAPTURE !== "false"; if (input.session_id) { try { const config = loadConfig(); From 331f15e58a9c8336b0045214276b16316eee7e05 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:30:00 +0000 Subject: [PATCH 11/39] test(session-end): source-level coverage for the hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers every branch of src/hooks/session-end.ts: - HIVEMIND_WIKI_WORKER=1 early return (nested worker) - HIVEMIND_CAPTURE=false opt-out - empty session_id / null loadConfig - lock held → skip with wiki log line - happy path: tryAcquireLock + spawn with correct args - cwd fallback to "" when missing - outer catch on readStdin throw → process.exit(0) Mocks at the network-boundary seams (readStdin, loadConfig, spawn helper, summary-state.tryAcquireLock, debug log). The rest of the hook body runs for real. Raises session-end.ts to 100%/100%/100%/100%. --- claude-code/tests/session-end-hook.test.ts | 139 +++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 claude-code/tests/session-end-hook.test.ts diff --git a/claude-code/tests/session-end-hook.test.ts b/claude-code/tests/session-end-hook.test.ts new file mode 100644 index 0000000..fc6feff --- /dev/null +++ b/claude-code/tests/session-end-hook.test.ts @@ -0,0 +1,139 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +/** + * Direct source-level tests for src/hooks/session-end.ts. The hook's + * `main()` runs at module import time, so each test resets the module + * registry, wires mocks, then dynamically imports the module and waits + * for the main promise chain to settle. + * + * Coverage target: every branch of the hook — the WIKI_WORKER / CAPTURE + * early-exits, empty session_id, missing config, lock held, happy path, + * and the outer catch for thrown errors. + * + * CLAUDE.md rule #2: mock only at the boundary. readStdin, loadConfig, + * spawnWikiWorker, wikiLog, and tryAcquireLock are the seams. The rest + * of the hook body runs for real. + */ + +const stdinMock = vi.fn(); +const loadConfigMock = vi.fn(); +const spawnMock = vi.fn(); +const wikiLogMock = vi.fn(); +const tryAcquireLockMock = vi.fn(); +const debugLogMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: stdinMock })); +vi.mock("../../src/config.js", () => ({ loadConfig: loadConfigMock })); +vi.mock("../../src/hooks/spawn-wiki-worker.js", () => ({ + spawnWikiWorker: spawnMock, + wikiLog: wikiLogMock, + bundleDirFromImportMeta: () => "/fake/bundle", +})); +vi.mock("../../src/hooks/summary-state.js", () => ({ + tryAcquireLock: tryAcquireLockMock, +})); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_tag: string, msg: string) => debugLogMock(msg), +})); + +async function runHook(): Promise { + vi.resetModules(); + await import("../../src/hooks/session-end.js"); + // main() is async and fires on import; give the microtask queue a + // chance to drain before we assert on the mocks. + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "o", workspaceId: "default", + userName: "u", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +beforeEach(() => { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + stdinMock.mockReset().mockResolvedValue({ session_id: "sid-1", cwd: "/proj" }); + loadConfigMock.mockReset().mockReturnValue(validConfig); + spawnMock.mockReset(); + wikiLogMock.mockReset(); + tryAcquireLockMock.mockReset().mockReturnValue(true); + debugLogMock.mockReset(); +}); + +afterEach(() => { vi.restoreAllMocks(); }); + +describe("session-end hook", () => { + it("returns immediately when HIVEMIND_WIKI_WORKER=1 (nested worker invocation)", async () => { + process.env.HIVEMIND_WIKI_WORKER = "1"; + await runHook(); + expect(stdinMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(tryAcquireLockMock).not.toHaveBeenCalled(); + }); + + it("returns immediately when HIVEMIND_CAPTURE=false (opt-out)", async () => { + process.env.HIVEMIND_CAPTURE = "false"; + await runHook(); + expect(stdinMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns without spawning when session_id is missing", async () => { + stdinMock.mockResolvedValue({ session_id: "", cwd: "/proj" }); + await runHook(); + expect(loadConfigMock).not.toHaveBeenCalled(); + expect(tryAcquireLockMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns without spawning when loadConfig returns null (no credentials)", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(tryAcquireLockMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith("no config"); + }); + + it("skips spawn with a wiki log line when the periodic worker holds the lock", async () => { + tryAcquireLockMock.mockReturnValue(false); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(wikiLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic worker already running for sid-1, skipping"), + ); + }); + + it("spawns the wiki worker on the happy path and logs 'triggering summary'", async () => { + await runHook(); + expect(tryAcquireLockMock).toHaveBeenCalledWith("sid-1"); + expect(wikiLogMock).toHaveBeenCalledWith( + expect.stringContaining("triggering summary for sid-1"), + ); + expect(spawnMock).toHaveBeenCalledTimes(1); + const callArg = spawnMock.mock.calls[0][0]; + expect(callArg.sessionId).toBe("sid-1"); + expect(callArg.cwd).toBe("/proj"); + expect(callArg.reason).toBe("SessionEnd"); + expect(callArg.config).toBe(validConfig); + }); + + it("falls back to empty cwd when stdin omits the field", async () => { + stdinMock.mockResolvedValue({ session_id: "sid-2" }); + await runHook(); + expect(spawnMock).toHaveBeenCalledWith( + expect.objectContaining({ sessionId: "sid-2", cwd: "" }), + ); + }); + + it("catches and logs a fatal error from readStdin without crashing the process", async () => { + const boom = new Error("stdin boom"); + stdinMock.mockRejectedValue(boom); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + // Let the catch in `main().catch(...)` run. + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: stdin boom"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); From a4f394d839c1bb03cb4c2ce4294f07a1c11624c4 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:30:11 +0000 Subject: [PATCH 12/39] test(codex-stop): source-level coverage for the hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers the full state machine of src/hooks/codex/stop.ts: - HIVEMIND_WIKI_WORKER=1 / empty session_id / null loadConfig guards - HIVEMIND_CAPTURE=false skips both capture and spawn - INSERT shape assertions (SQL count=1, Stop marker, codex agent, jsonb) - INSERT failure is swallowed and spawn still runs (the capture hook and the wiki spawn are independent code paths) - Transcript parsing branches: string content, array of output_text/text blocks, malformed JSONL skip, missing file, non-string/non-array content falls back to assistant_stop - Lock held vs free — the race fix - Fatal catch on readStdin throw Raises codex/stop.ts to 98.3%/90.5%/100%/98%. --- claude-code/tests/codex-stop-hook.test.ts | 256 ++++++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 claude-code/tests/codex-stop-hook.test.ts diff --git a/claude-code/tests/codex-stop-hook.test.ts b/claude-code/tests/codex-stop-hook.test.ts new file mode 100644 index 0000000..1a7aad8 --- /dev/null +++ b/claude-code/tests/codex-stop-hook.test.ts @@ -0,0 +1,256 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { mkdtempSync, writeFileSync, rmSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +/** + * Direct source-level tests for src/hooks/codex/stop.ts. Covers the + * whole hook: WIKI_WORKER guard, CAPTURE guard (computed at module + * load — we resetModules per scenario), missing session_id, missing + * config, transcript parsing (string / array / bad / missing), INSERT + * failure path, lock held vs free, the spawn call, and the fatal catch. + */ + +const stdinMock = vi.fn(); +const loadConfigMock = vi.fn(); +const spawnMock = vi.fn(); +const wikiLogMock = vi.fn(); +const tryAcquireLockMock = vi.fn(); +const debugLogMock = vi.fn(); +const queryMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...args: any[]) => stdinMock(...args) })); +vi.mock("../../src/config.js", () => ({ loadConfig: (...args: any[]) => loadConfigMock(...args) })); +vi.mock("../../src/hooks/codex/spawn-wiki-worker.js", () => ({ + spawnCodexWikiWorker: (...args: any[]) => spawnMock(...args), + wikiLog: (...args: any[]) => wikiLogMock(...args), + bundleDirFromImportMeta: () => "/fake/codex/bundle", +})); +vi.mock("../../src/hooks/summary-state.js", () => ({ + tryAcquireLock: (...args: any[]) => tryAcquireLockMock(...args), +})); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_tag: string, msg: string) => debugLogMock(msg), +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { query(sql: string) { return queryMock(sql); } }, +})); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + await import("../../src/hooks/codex/stop.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "org", workspaceId: "default", + userName: "u", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +let tmpDir: string; + +beforeEach(() => { + tmpDir = mkdtempSync(join(tmpdir(), "codex-stop-test-")); + stdinMock.mockReset().mockResolvedValue({ + session_id: "sid-1", cwd: "/proj/foo", hook_event_name: "Stop", model: "gpt-5", + transcript_path: null, + }); + loadConfigMock.mockReset().mockReturnValue(validConfig); + spawnMock.mockReset(); + wikiLogMock.mockReset(); + tryAcquireLockMock.mockReset().mockReturnValue(true); + debugLogMock.mockReset(); + queryMock.mockReset().mockResolvedValue([]); +}); + +afterEach(() => { + vi.restoreAllMocks(); + try { rmSync(tmpDir, { recursive: true, force: true }); } catch { /* ignore */ } +}); + +describe("codex stop hook — guard paths", () => { + it("returns immediately when HIVEMIND_WIKI_WORKER=1", async () => { + await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(stdinMock).not.toHaveBeenCalled(); + expect(queryMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns without spawning when session_id is empty", async () => { + stdinMock.mockResolvedValue({ session_id: "", cwd: "/x", hook_event_name: "Stop", model: "m" }); + await runHook(); + expect(loadConfigMock).not.toHaveBeenCalled(); + expect(queryMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("returns without spawning when loadConfig returns null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(queryMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith("no config"); + }); + + it("skips capture AND spawn when HIVEMIND_CAPTURE=false", async () => { + await runHook({ HIVEMIND_CAPTURE: "false" }); + expect(queryMock).not.toHaveBeenCalled(); + expect(tryAcquireLockMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex stop hook — capture path + INSERT shape", () => { + it("issues exactly one INSERT against the sessions table on the happy path", async () => { + await runHook(); + expect(queryMock).toHaveBeenCalledTimes(1); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toMatch(/^INSERT INTO "sessions"/); + expect(sql).toContain("'Stop'"); + expect(sql).toContain("'codex'"); + expect(sql).toContain("sid-1"); + expect(sql).toContain("::jsonb"); + expect(debugLogMock).toHaveBeenCalledWith("stop event captured"); + }); + + it("swallows an INSERT failure and still tries to spawn the wiki worker", async () => { + queryMock.mockRejectedValue(new Error("network down")); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("capture failed: network down"); + expect(spawnMock).toHaveBeenCalledTimes(1); + }); + + it("derives projectName=unknown when cwd is the empty string", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-x", cwd: "", hook_event_name: "Stop", model: "m", transcript_path: null, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("'unknown'"); + }); +}); + +describe("codex stop hook — transcript parsing", () => { + const writeTranscript = (lines: string[]): string => { + const p = join(tmpDir, "transcript.jsonl"); + writeFileSync(p, lines.join("\n")); + return p; + }; + + it("extracts the last assistant message when content is a plain string", async () => { + const path = writeTranscript([ + JSON.stringify({ payload: { role: "user", content: "hi" } }), + JSON.stringify({ payload: { role: "assistant", content: "hello there" } }), + ]); + stdinMock.mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "Stop", model: "m", transcript_path: path, + }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("extracted assistant message from transcript"), + ); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("hello there"); + expect(sql).toContain('"type":"assistant_message"'); + }); + + it("extracts from content arrays, joining output_text / text blocks", async () => { + const path = writeTranscript([ + JSON.stringify({ + payload: { + role: "assistant", + content: [ + { type: "output_text", text: "part A" }, + { type: "reasoning", text: "ignored" }, + { type: "text", text: "part B" }, + ], + }, + }), + ]); + stdinMock.mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "Stop", model: "m", transcript_path: path, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("part A"); + expect(sql).toContain("part B"); + }); + + it("skips malformed JSONL lines and falls back to assistant_stop when no valid message", async () => { + const path = writeTranscript([ + "{not json", + JSON.stringify({ payload: { role: "user", content: "hey" } }), + ]); + stdinMock.mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "Stop", model: "m", transcript_path: path, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"assistant_stop"'); + }); + + it("handles a transcript_path that does not exist on disk (no log, no content)", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "Stop", model: "m", + transcript_path: join(tmpDir, "missing.jsonl"), + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"assistant_stop"'); + expect(debugLogMock).not.toHaveBeenCalledWith( + expect.stringContaining("extracted assistant message"), + ); + }); + + it("treats content as empty when it is neither string nor array (defensive branch)", async () => { + const path = writeTranscript([ + JSON.stringify({ payload: { role: "assistant", content: { weird: true } } }), + ]); + stdinMock.mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "Stop", model: "m", transcript_path: path, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"assistant_stop"'); + }); +}); + +describe("codex stop hook — wiki spawn + lock coordination", () => { + it("skips the wiki spawn with a log line when tryAcquireLock returns false", async () => { + tryAcquireLockMock.mockReturnValue(false); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(wikiLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic worker already running for sid-1, skipping"), + ); + }); + + it("spawns the codex wiki worker on the happy path with the right arguments", async () => { + await runHook(); + expect(spawnMock).toHaveBeenCalledTimes(1); + const arg = spawnMock.mock.calls[0][0]; + expect(arg.sessionId).toBe("sid-1"); + expect(arg.cwd).toBe("/proj/foo"); + expect(arg.reason).toBe("Stop"); + expect(arg.config).toBe(validConfig); + }); +}); + +describe("codex stop hook — fatal catch", () => { + it("catches a thrown readStdin error and exits 0 without crashing", async () => { + stdinMock.mockRejectedValue(new Error("bad stdin")); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: bad stdin"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); From 0391e49dbad6c52aea151ec81d5dba35be28be2f Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:30:24 +0000 Subject: [PATCH 13/39] test(cc-capture): source-level coverage for the hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit src/hooks/capture.ts had no direct test — capture.test.ts duplicated buildSessionPath inline and tested the copy. This imports the real module and exercises every branch: - CAPTURE guard / null config - Event-type branches: user_message / tool_call / assistant_message (with and without agent_transcript_path) / unknown event skip - INSERT fallback: table-missing triggers ensureSessionsTable + retry (both 'does not exist' and 'permission denied' variants) - Unrelated SQL errors re-throw and bubble to the outer main().catch - Periodic trigger helper: bumpTotalCount + shouldTrigger branches, lock held vs free, spawn failure releases the lock, release failure is swallowed, outer catch on bumpTotalCount throw - Defensive fallbacks: undefined workspaceId → 'default', missing cwd → projectName='unknown' Raises capture.ts to 100%/97%/100%/100%. --- claude-code/tests/capture-hook.test.ts | 313 +++++++++++++++++++++++++ 1 file changed, 313 insertions(+) create mode 100644 claude-code/tests/capture-hook.test.ts diff --git a/claude-code/tests/capture-hook.test.ts b/claude-code/tests/capture-hook.test.ts new file mode 100644 index 0000000..c40e8e6 --- /dev/null +++ b/claude-code/tests/capture-hook.test.ts @@ -0,0 +1,313 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +/** + * Direct source-level tests for src/hooks/capture.ts. The module runs + * main() at import time; each scenario resets the registry and imports + * fresh. Mocks: readStdin, loadConfig, DeeplakeApi, spawn-wiki-worker, + * summary-state. Everything else (SQL assembly, entry shape, meta + * merging, JSON escaping) runs for real. + * + * Coverage target: each event-type branch (prompt / tool / assistant / + * unknown), the CAPTURE guard, the table-missing retry, the unrelated + * error re-throw, and every leg of the periodic-trigger helper + * (threshold not met / met + lock free / met + lock held / spawn + * throws / outer catch). + */ + +const stdinMock = vi.fn(); +const loadConfigMock = vi.fn(); +const spawnMock = vi.fn(); +const wikiLogMock = vi.fn(); +const tryAcquireLockMock = vi.fn(); +const releaseLockMock = vi.fn(); +const bumpTotalCountMock = vi.fn(); +const loadTriggerConfigMock = vi.fn(); +const shouldTriggerMock = vi.fn(); +const debugLogMock = vi.fn(); +const queryMock = vi.fn(); +const ensureSessionsTableMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); +vi.mock("../../src/hooks/spawn-wiki-worker.js", () => ({ + spawnWikiWorker: (...a: any[]) => spawnMock(...a), + wikiLog: (...a: any[]) => wikiLogMock(...a), + bundleDirFromImportMeta: () => "/fake/bundle", +})); +vi.mock("../../src/hooks/summary-state.js", () => ({ + tryAcquireLock: (...a: any[]) => tryAcquireLockMock(...a), + releaseLock: (...a: any[]) => releaseLockMock(...a), + bumpTotalCount: (...a: any[]) => bumpTotalCountMock(...a), + loadTriggerConfig: (...a: any[]) => loadTriggerConfigMock(...a), + shouldTrigger: (...a: any[]) => shouldTriggerMock(...a), +})); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_tag: string, msg: string) => debugLogMock(msg), +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { + query(sql: string) { return queryMock(sql); } + ensureSessionsTable(t: string) { return ensureSessionsTableMock(t); } + }, +})); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + await import("../../src/hooks/capture.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + userName: "alice", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +beforeEach(() => { + stdinMock.mockReset().mockResolvedValue({ + session_id: "sid-1", + cwd: "/workspaces/proj", + hook_event_name: "UserPromptSubmit", + prompt: "hello", + }); + loadConfigMock.mockReset().mockReturnValue(validConfig); + spawnMock.mockReset(); + wikiLogMock.mockReset(); + tryAcquireLockMock.mockReset().mockReturnValue(true); + releaseLockMock.mockReset(); + bumpTotalCountMock.mockReset().mockReturnValue({ + lastSummaryAt: Date.now(), lastSummaryCount: 0, totalCount: 1, + }); + loadTriggerConfigMock.mockReset().mockReturnValue({ everyNMessages: 50, everyHours: 2 }); + shouldTriggerMock.mockReset().mockReturnValue(false); + debugLogMock.mockReset(); + queryMock.mockReset().mockResolvedValue([]); + ensureSessionsTableMock.mockReset().mockResolvedValue(undefined); +}); + +afterEach(() => { vi.restoreAllMocks(); }); + +describe("capture hook — guard", () => { + it("returns without touching stdin when HIVEMIND_CAPTURE=false", async () => { + await runHook({ HIVEMIND_CAPTURE: "false" }); + expect(stdinMock).not.toHaveBeenCalled(); + expect(queryMock).not.toHaveBeenCalled(); + }); + + it("returns when loadConfig returns null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("no config"); + expect(queryMock).not.toHaveBeenCalled(); + }); +}); + +describe("capture hook — event-type branches", () => { + it("user_message: INSERT contains prompt content", async () => { + await runHook(); + expect(queryMock).toHaveBeenCalledTimes(1); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toMatch(/INSERT INTO "sessions"/); + expect(sql).toContain('"type":"user_message"'); + expect(sql).toContain('"content":"hello"'); + expect(debugLogMock).toHaveBeenCalledWith(expect.stringMatching(/^user session=sid-1$/)); + }); + + it("tool_call: INSERT contains tool_name + serialized input/response", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-2", + cwd: "/p", + hook_event_name: "PostToolUse", + tool_name: "Bash", + tool_use_id: "tu-1", + tool_input: { command: "ls" }, + tool_response: { stdout: "file" }, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"tool_call"'); + expect(sql).toContain('"tool_name":"Bash"'); + expect(sql).toContain('tool_input'); + expect(sql).toContain('tool_response'); + expect(debugLogMock).toHaveBeenCalledWith(expect.stringMatching(/^tool=Bash session=sid-2$/)); + }); + + it("assistant_message without agent_transcript_path", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-3", + cwd: "/p", + hook_event_name: "Stop", + last_assistant_message: "reply text", + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"assistant_message"'); + expect(sql).toContain('"content":"reply text"'); + expect(sql).not.toContain("agent_transcript_path"); + }); + + it("assistant_message WITH agent_transcript_path", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-4", + cwd: "/p", + hook_event_name: "SubagentStop", + last_assistant_message: "sub reply", + agent_transcript_path: "/tmp/agent.jsonl", + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"agent_transcript_path":"/tmp/agent.jsonl"'); + }); + + it("unknown event: skipped, no INSERT", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-x", cwd: "/p", hook_event_name: "WeirdHook", + // no prompt, no tool_name, no last_assistant_message + }); + await runHook(); + expect(queryMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith("unknown event, skipping"); + }); +}); + +describe("capture hook — INSERT fallback + error paths", () => { + it("creates the sessions table and retries when table is missing", async () => { + queryMock + .mockRejectedValueOnce(new Error('relation "sessions" does not exist')) + .mockResolvedValueOnce([]); + await runHook(); + expect(ensureSessionsTableMock).toHaveBeenCalledWith("sessions"); + expect(queryMock).toHaveBeenCalledTimes(2); + expect(debugLogMock).toHaveBeenCalledWith("table missing, creating and retrying"); + }); + + it("creates the sessions table when the API returns 'permission denied'", async () => { + queryMock + .mockRejectedValueOnce(new Error("permission denied for relation sessions")) + .mockResolvedValueOnce([]); + await runHook(); + expect(ensureSessionsTableMock).toHaveBeenCalled(); + expect(queryMock).toHaveBeenCalledTimes(2); + }); + + it("re-throws unrelated errors (caught by main().catch)", async () => { + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + queryMock.mockRejectedValue(new Error("random SQL boom")); + await runHook(); + // The outer catch wraps the throw into the fatal log and exits. + expect(debugLogMock).toHaveBeenCalledWith("fatal: random SQL boom"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); + +describe("capture hook — periodic trigger helper", () => { + it("does nothing when HIVEMIND_WIKI_WORKER=1 (nested worker)", async () => { + await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + // The inner call is bypassed — but CAPTURE is also computed at load, + // so with WIKI_WORKER=1 the capture itself still runs (CAPTURE default + // is true). We just assert bumpTotalCount was NOT called. + expect(bumpTotalCountMock).not.toHaveBeenCalled(); + }); + + it("does not spawn when shouldTrigger returns false", async () => { + shouldTriggerMock.mockReturnValue(false); + await runHook(); + expect(bumpTotalCountMock).toHaveBeenCalledTimes(1); + expect(tryAcquireLockMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("spawns the wiki worker when shouldTrigger=true and lock acquired", async () => { + shouldTriggerMock.mockReturnValue(true); + bumpTotalCountMock.mockReturnValue({ + lastSummaryAt: 0, lastSummaryCount: 0, totalCount: 10, + }); + await runHook(); + expect(tryAcquireLockMock).toHaveBeenCalledWith("sid-1"); + expect(wikiLogMock).toHaveBeenCalledWith( + expect.stringMatching(/^Periodic: threshold hit \(total=10,/), + ); + expect(spawnMock).toHaveBeenCalledTimes(1); + expect(spawnMock.mock.calls[0][0]).toMatchObject({ sessionId: "sid-1", reason: "Periodic" }); + }); + + it("logs 'periodic trigger suppressed' when the lock is already held", async () => { + shouldTriggerMock.mockReturnValue(true); + tryAcquireLockMock.mockReturnValue(false); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger suppressed (lock held)"), + ); + }); + + it("releases the lock if spawnWikiWorker throws", async () => { + shouldTriggerMock.mockReturnValue(true); + spawnMock.mockImplementation(() => { throw new Error("spawn failed"); }); + await runHook(); + expect(releaseLockMock).toHaveBeenCalledWith("sid-1"); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: spawn failed"), + ); + }); + + it("still swallows the error when releaseLock ALSO throws", async () => { + shouldTriggerMock.mockReturnValue(true); + spawnMock.mockImplementation(() => { throw new Error("spawn failed"); }); + releaseLockMock.mockImplementation(() => { throw new Error("release failed"); }); + await runHook(); + // We should still see the outer periodic-trigger error log — the + // release throw is deliberately swallowed. + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: spawn failed"), + ); + }); + + it("catches errors thrown by bumpTotalCount itself (outer try)", async () => { + bumpTotalCountMock.mockImplementation(() => { throw new Error("bump boom"); }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: bump boom"), + ); + }); +}); + +describe("capture hook — defensive fallback branches", () => { + it("falls back to 'default' workspace when config.workspaceId is undefined", async () => { + loadConfigMock.mockReturnValue({ ...validConfig, workspaceId: undefined }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + // sessionPath uses workspace; with undefined it should land on 'default' + expect(sql).toContain("alice_acme_default_sid-1.jsonl"); + }); + + it("projectName falls back to 'unknown' when cwd is undefined", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-cwd", hook_event_name: "UserPromptSubmit", prompt: "x", + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("'unknown'"); + }); + + it("hook_event_name defaults to empty string when missing", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-no-evt", cwd: "/p", prompt: "hi", + // no hook_event_name + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + // description column (hook_event_name ?? '') should land as '' + // It appears between the projectName and the author — we just + // assert the INSERT still went through. + expect(queryMock).toHaveBeenCalledTimes(1); + expect(sql).toMatch(/'[^']*', 'claude_code'/); + }); +}); From b7f04a3acc2c0dac789aaf6566b4d27b6f429bdd Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:30:33 +0000 Subject: [PATCH 14/39] test(codex-capture): source-level coverage for the hook Same structure as the claude-code capture test, mirrored for src/hooks/codex/capture.ts. Codex capture gates on hook_event_name matching UserPromptSubmit / PostToolUse exactly, so the branch coverage includes the 'UserPromptSubmit without prompt' and 'PostToolUse without tool_name' defensive skips alongside the happy paths. Raises codex/capture.ts to 100%/93.75%/100%/100%. --- claude-code/tests/codex-capture-hook.test.ts | 284 +++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 claude-code/tests/codex-capture-hook.test.ts diff --git a/claude-code/tests/codex-capture-hook.test.ts b/claude-code/tests/codex-capture-hook.test.ts new file mode 100644 index 0000000..9992182 --- /dev/null +++ b/claude-code/tests/codex-capture-hook.test.ts @@ -0,0 +1,284 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +/** + * Direct source-level tests for src/hooks/codex/capture.ts. Mirrors the + * claude-code capture-hook test: mocks the stdin / config / API / + * summary-state seams and asserts SQL shape, branch coverage for + * UserPromptSubmit / PostToolUse / unknown, and the periodic trigger + * helper. + */ + +const stdinMock = vi.fn(); +const loadConfigMock = vi.fn(); +const spawnMock = vi.fn(); +const wikiLogMock = vi.fn(); +const tryAcquireLockMock = vi.fn(); +const releaseLockMock = vi.fn(); +const bumpTotalCountMock = vi.fn(); +const loadTriggerConfigMock = vi.fn(); +const shouldTriggerMock = vi.fn(); +const debugLogMock = vi.fn(); +const queryMock = vi.fn(); +const ensureSessionsTableMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); +vi.mock("../../src/hooks/codex/spawn-wiki-worker.js", () => ({ + spawnCodexWikiWorker: (...a: any[]) => spawnMock(...a), + wikiLog: (...a: any[]) => wikiLogMock(...a), + bundleDirFromImportMeta: () => "/fake/codex/bundle", +})); +vi.mock("../../src/hooks/summary-state.js", () => ({ + tryAcquireLock: (...a: any[]) => tryAcquireLockMock(...a), + releaseLock: (...a: any[]) => releaseLockMock(...a), + bumpTotalCount: (...a: any[]) => bumpTotalCountMock(...a), + loadTriggerConfig: (...a: any[]) => loadTriggerConfigMock(...a), + shouldTrigger: (...a: any[]) => shouldTriggerMock(...a), +})); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_tag: string, msg: string) => debugLogMock(msg), +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { + query(sql: string) { return queryMock(sql); } + ensureSessionsTable(t: string) { return ensureSessionsTableMock(t); } + }, +})); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + await import("../../src/hooks/codex/capture.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + userName: "alice", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +beforeEach(() => { + stdinMock.mockReset().mockResolvedValue({ + session_id: "sid-1", + cwd: "/workspaces/proj", + hook_event_name: "UserPromptSubmit", + model: "gpt-5", + prompt: "hello", + }); + loadConfigMock.mockReset().mockReturnValue(validConfig); + spawnMock.mockReset(); + wikiLogMock.mockReset(); + tryAcquireLockMock.mockReset().mockReturnValue(true); + releaseLockMock.mockReset(); + bumpTotalCountMock.mockReset().mockReturnValue({ + lastSummaryAt: 0, lastSummaryCount: 0, totalCount: 1, + }); + loadTriggerConfigMock.mockReset().mockReturnValue({ everyNMessages: 50, everyHours: 2 }); + shouldTriggerMock.mockReset().mockReturnValue(false); + debugLogMock.mockReset(); + queryMock.mockReset().mockResolvedValue([]); + ensureSessionsTableMock.mockReset().mockResolvedValue(undefined); +}); + +afterEach(() => { vi.restoreAllMocks(); }); + +describe("codex capture hook — guards", () => { + it("returns when HIVEMIND_CAPTURE=false", async () => { + await runHook({ HIVEMIND_CAPTURE: "false" }); + expect(stdinMock).not.toHaveBeenCalled(); + }); + + it("returns when loadConfig is null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("no config"); + expect(queryMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex capture hook — event-type branches", () => { + it("user_message: INSERT contains prompt", async () => { + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toMatch(/INSERT INTO "sessions"/); + expect(sql).toContain('"type":"user_message"'); + expect(sql).toContain('"content":"hello"'); + expect(sql).toContain("'codex'"); + }); + + it("tool_call: INSERT contains tool_name and model metadata", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-2", cwd: "/p", + hook_event_name: "PostToolUse", + model: "gpt-5", + tool_name: "Bash", + tool_use_id: "tu-1", + tool_input: { command: "ls" }, + tool_response: { stdout: "x" }, + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain('"type":"tool_call"'); + expect(sql).toContain('"tool_name":"Bash"'); + expect(sql).toContain('"model":"gpt-5"'); + }); + + it("unknown hook_event_name → log and skip", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-x", cwd: "/p", hook_event_name: "SomethingElse", model: "m", + }); + await runHook(); + expect(queryMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith("unknown event: SomethingElse, skipping"); + }); + + it("UserPromptSubmit without prompt → skipped (defensive)", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-y", cwd: "/p", hook_event_name: "UserPromptSubmit", model: "m", + }); + await runHook(); + expect(queryMock).not.toHaveBeenCalled(); + }); + + it("PostToolUse without tool_name → skipped (defensive)", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-z", cwd: "/p", hook_event_name: "PostToolUse", model: "m", + }); + await runHook(); + expect(queryMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex capture hook — INSERT fallbacks", () => { + it("retries after creating the sessions table on 'does not exist'", async () => { + queryMock + .mockRejectedValueOnce(new Error('relation "sessions" does not exist')) + .mockResolvedValueOnce([]); + await runHook(); + expect(ensureSessionsTableMock).toHaveBeenCalledWith("sessions"); + expect(queryMock).toHaveBeenCalledTimes(2); + }); + + it("retries on 'permission denied' too", async () => { + queryMock + .mockRejectedValueOnce(new Error("permission denied")) + .mockResolvedValueOnce([]); + await runHook(); + expect(ensureSessionsTableMock).toHaveBeenCalled(); + }); + + it("re-throws an unrelated SQL error", async () => { + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + queryMock.mockRejectedValue(new Error("syntax error")); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("fatal: syntax error"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); + +describe("codex capture hook — periodic trigger", () => { + it("bypasses the trigger when HIVEMIND_WIKI_WORKER=1", async () => { + await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(bumpTotalCountMock).not.toHaveBeenCalled(); + }); + + it("no spawn when shouldTrigger=false", async () => { + shouldTriggerMock.mockReturnValue(false); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + }); + + it("spawns when shouldTrigger=true + lock free", async () => { + shouldTriggerMock.mockReturnValue(true); + bumpTotalCountMock.mockReturnValue({ + lastSummaryAt: 0, lastSummaryCount: 0, totalCount: 10, + }); + await runHook(); + expect(spawnMock).toHaveBeenCalledTimes(1); + expect(spawnMock.mock.calls[0][0]).toMatchObject({ sessionId: "sid-1", reason: "Periodic" }); + }); + + it("suppresses when lock held", async () => { + shouldTriggerMock.mockReturnValue(true); + tryAcquireLockMock.mockReturnValue(false); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger suppressed (lock held)"), + ); + }); + + it("releases the lock when spawn throws", async () => { + shouldTriggerMock.mockReturnValue(true); + spawnMock.mockImplementation(() => { throw new Error("spawn boom"); }); + await runHook(); + expect(releaseLockMock).toHaveBeenCalledWith("sid-1"); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: spawn boom"), + ); + }); + + it("swallows release failure on top of spawn failure", async () => { + shouldTriggerMock.mockReturnValue(true); + spawnMock.mockImplementation(() => { throw new Error("spawn boom"); }); + releaseLockMock.mockImplementation(() => { throw new Error("release boom"); }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: spawn boom"), + ); + }); + + it("outer try catches bumpTotalCount throw", async () => { + bumpTotalCountMock.mockImplementation(() => { throw new Error("bump boom"); }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("periodic trigger error: bump boom"), + ); + }); +}); + +describe("codex capture hook — defensive fallbacks", () => { + it("falls back projectName='unknown' when cwd is '' ", async () => { + stdinMock.mockResolvedValue({ + session_id: "sid-c", cwd: "", hook_event_name: "UserPromptSubmit", model: "m", prompt: "x", + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("'unknown'"); + }); + + it("falls back projectName='unknown' when cwd is undefined at runtime", async () => { + // The interface types cwd as string, but runtime values can arrive + // undefined from untyped hook inputs. The ?? fallbacks exist for this. + stdinMock.mockResolvedValue({ + session_id: "sid-d", hook_event_name: "UserPromptSubmit", model: "m", prompt: "x", + }); + await runHook(); + const sql = queryMock.mock.calls[0][0] as string; + expect(sql).toContain("'unknown'"); + }); + + it("passes empty hook_event_name through the description column fallback", async () => { + // `input.hook_event_name ?? ''` — construct an input where the field + // is legitimately missing to exercise the nullish coalesce. + stdinMock.mockResolvedValue({ + session_id: "sid-e", cwd: "/p", model: "m", + }); + await runHook(); + // UserPromptSubmit / PostToolUse are the only types the codex + // capture handles, so this falls into "unknown event, skipping". + // That's fine — the branch we want is the `?? ''` in the INSERT + // string which runs later; to reach it we supply a prompt and + // leave hook_event_name undefined. Codex capture gates on + // hook_event_name === 'UserPromptSubmit', so undefined won't match + // and the INSERT is skipped. That is itself a useful branch. + expect(queryMock).not.toHaveBeenCalled(); + }); +}); From 2853115a21df7f777eed7eaa1a1689a1672e62a1 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:30:44 +0000 Subject: [PATCH 15/39] test(session-start*): source-level coverage for both hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers src/hooks/session-start.ts and session-start-setup.ts. Both hooks share the version-check + autoupdate flow (fetch GitHub package.json, compare, execSync plugin update, clean old cache entries). Tests mock global.fetch + child_process.execSync + node:fs.readdirSync/rmSync so the whole path runs without touching the network or the real plugin cache. Also exercises the placeholder branch in session-start.ts: the hook does a direct SQL SELECT for the summary row, then either skips (row exists → resumed session) or INSERTs a new placeholder. Both branches are asserted on SQL count and shape (CLAUDE.md rule #3). session-start.ts → 95.9% / 84.1% / 100% / 98.2% session-start-setup.ts → 95.4% / 82.0% / 100% / 97.3% --- claude-code/tests/session-start-hook.test.ts | 313 ++++++++++++++++++ .../tests/session-start-setup-hook.test.ts | 229 +++++++++++++ 2 files changed, 542 insertions(+) create mode 100644 claude-code/tests/session-start-hook.test.ts create mode 100644 claude-code/tests/session-start-setup-hook.test.ts diff --git a/claude-code/tests/session-start-hook.test.ts b/claude-code/tests/session-start-hook.test.ts new file mode 100644 index 0000000..39f6552 --- /dev/null +++ b/claude-code/tests/session-start-hook.test.ts @@ -0,0 +1,313 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { mkdtempSync, rmSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +/** + * Direct source-level tests for src/hooks/session-start.ts. The hook + * orchestrates: credential load, userName backfill, table+placeholder + * setup, version check + auto-update, and the additionalContext output. + * + * Mocks: readStdin, loadCredentials/saveCredentials, loadConfig, + * DeeplakeApi, global.fetch, child_process.execSync, and the two + * node:fs helpers used by the cache-cleanup path (readdirSync, rmSync). + */ + +const stdinMock = vi.fn(); +const loadCredsMock = vi.fn(); +const saveCredsMock = vi.fn(); +const loginMock = vi.fn(); +const loadConfigMock = vi.fn(); +const debugLogMock = vi.fn(); +const ensureTableMock = vi.fn(); +const ensureSessionsTableMock = vi.fn(); +const queryMock = vi.fn(); +const execSyncMock = vi.fn(); +const readdirSyncMock = vi.fn(); +const rmSyncMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/commands/auth.js", () => ({ + loadCredentials: (...a: any[]) => loadCredsMock(...a), + saveCredentials: (...a: any[]) => saveCredsMock(...a), + login: (...a: any[]) => loginMock(...a), +})); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_t: string, msg: string) => debugLogMock(msg), + utcTimestamp: () => "2026-04-17 00:00:00 UTC", +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { + ensureTable() { return ensureTableMock(); } + ensureSessionsTable(t: string) { return ensureSessionsTableMock(t); } + query(sql: string) { return queryMock(sql); } + }, +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, execSync: (...a: any[]) => execSyncMock(...a) }; +}); +vi.mock("node:fs", async () => { + const actual = await vi.importActual("node:fs"); + return { + ...actual, + readdirSync: (...a: any[]) => readdirSyncMock(...a), + rmSync: (...a: any[]) => rmSyncMock(...a), + }; +}); + +const originalFetch = global.fetch; +const fetchMock = vi.fn(); + +let stdoutLines: string[] = []; +const stdoutSpy = vi.spyOn(process.stdout, "write"); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + stdoutLines = []; + stdoutSpy.mockImplementation((chunk: any) => { stdoutLines.push(String(chunk)); return true; }); + vi.resetModules(); + // @ts-expect-error + global.fetch = fetchMock; + // Intercept console.log which session-start.ts uses for the JSON emit + const originalLog = console.log; + const collected: string[] = []; + console.log = (...args: any[]) => { collected.push(args.join(" ")); }; + try { + await import("../../src/hooks/session-start.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); + return collected.join("\n") || null; + } finally { + console.log = originalLog; + } +} + +const validConfig = { + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + userName: "alice", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +let cacheTmp: string; + +beforeEach(() => { + cacheTmp = mkdtempSync(join(tmpdir(), "session-start-test-")); + stdinMock.mockReset().mockResolvedValue({ session_id: "sid-1", cwd: "/workspaces/proj" }); + loadCredsMock.mockReset().mockReturnValue({ + token: "tok", orgId: "o", orgName: "acme", userName: "alice", workspaceId: "default", + }); + saveCredsMock.mockReset(); + loadConfigMock.mockReset().mockReturnValue(validConfig); + debugLogMock.mockReset(); + ensureTableMock.mockReset().mockResolvedValue(undefined); + ensureSessionsTableMock.mockReset().mockResolvedValue(undefined); + queryMock.mockReset().mockResolvedValue([]); // "no existing summary" + execSyncMock.mockReset(); + readdirSyncMock.mockReset().mockReturnValue([]); + rmSyncMock.mockReset(); + fetchMock.mockReset().mockResolvedValue({ + ok: true, + json: async () => ({ version: "0.0.1" }), // older-or-equal → no update + }); +}); + +afterEach(() => { + vi.restoreAllMocks(); + // @ts-expect-error + global.fetch = originalFetch; + try { rmSync(cacheTmp, { recursive: true, force: true }); } catch { /* ignore */ } +}); + +// ═══ Guard + credential branches ═══════════════════════════════════════════ + +describe("session-start hook — guards", () => { + it("returns immediately when HIVEMIND_WIKI_WORKER=1", async () => { + const out = await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(stdinMock).not.toHaveBeenCalled(); + expect(out).toBeNull(); + }); + + it("emits additionalContext with the not-logged-in warning when no creds", async () => { + loadCredsMock.mockReturnValue(null); + const out = await runHook(); + expect(out).not.toBeNull(); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("Not logged in to Deeplake"); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("no credentials found"), + ); + }); + + it("emits the logged-in context when creds are present", async () => { + const out = await runHook(); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("Logged in to Deeplake as org: acme"); + expect(parsed.hookSpecificOutput.additionalContext).toContain("workspace: default"); + }); + + it("falls back to orgId when orgName is missing", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "org-uuid", userName: "u", workspaceId: "default", + }); + const out = await runHook(); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("Logged in to Deeplake as org: org-uuid"); + }); + + it("backfills userName via node:os when credentials lack one", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + }); + await runHook(); + expect(saveCredsMock).toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringMatching(/^backfilled and persisted userName: /), + ); + }); +}); + +// ═══ Table setup + placeholder ═════════════════════════════════════════════ + +describe("session-start hook — placeholder branching", () => { + it("creates placeholder when summary does not exist (query returns [])", async () => { + await runHook(); + expect(ensureTableMock).toHaveBeenCalled(); + expect(ensureSessionsTableMock).toHaveBeenCalledWith("sessions"); + // 1 SELECT (existing check) + 1 INSERT = 2 queries. + expect(queryMock).toHaveBeenCalledTimes(2); + expect(queryMock.mock.calls[0][0]).toMatch(/^SELECT path FROM/); + expect(queryMock.mock.calls[1][0]).toMatch(/^INSERT INTO/); + expect(debugLogMock).toHaveBeenCalledWith("placeholder created"); + }); + + it("skips placeholder INSERT when summary already exists (resumed session)", async () => { + queryMock.mockResolvedValueOnce([{ path: "/summaries/alice/sid-1.md" }]); + await runHook(); + expect(queryMock).toHaveBeenCalledTimes(1); // only the SELECT + }); + + it("skips placeholder INSERT when HIVEMIND_CAPTURE=false but still ensures tables", async () => { + await runHook({ HIVEMIND_CAPTURE: "false" }); + expect(ensureTableMock).toHaveBeenCalled(); + expect(ensureSessionsTableMock).toHaveBeenCalled(); + expect(queryMock).not.toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + "placeholder skipped (HIVEMIND_CAPTURE=false)", + ); + }); + + it("swallows placeholder errors and logs via both loggers", async () => { + ensureTableMock.mockRejectedValue(new Error("table boom")); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("placeholder failed: table boom"), + ); + }); + + it("skips setup when loadConfig returns null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); + + it("skips setup when session_id is empty", async () => { + stdinMock.mockResolvedValue({ session_id: "", cwd: "/x" }); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); +}); + +// ═══ Version check + autoupdate ═════════════════════════════════════════════ + +describe("session-start hook — version check", () => { + it("runs execSync and cleans old cache entries when a newer version is available", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0" }), + }); + readdirSyncMock.mockReturnValue([ + { name: "0.0.1", isDirectory: () => true }, + { name: "999.0.0", isDirectory: () => true }, // latest, must NOT be removed + ]); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + const out = await runHook(); + expect(execSyncMock).toHaveBeenCalled(); + expect(rmSyncMock).toHaveBeenCalledTimes(1); + expect(rmSyncMock.mock.calls[0][0]).toContain("0.0.1"); + expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining("auto-updated")); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("auto-updated"); + }); + + it("falls back to manual-upgrade message when autoupdate is disabled", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "o", orgName: "acme", userName: "u", workspaceId: "default", + autoupdate: false, + }); + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "999.0.0" }) }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("update available"), + ); + }); + + it("emits the 'auto-update failed' message when execSync throws", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "999.0.0" }) }); + execSyncMock.mockImplementation(() => { throw new Error("npm unreachable"); }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("Auto-update failed"), + ); + }); + + it("tolerates fetch failure (GitHub unreachable)", async () => { + fetchMock.mockRejectedValue(new Error("offline")); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("tolerates readdirSync throw during cache cleanup", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "999.0.0" }) }); + readdirSyncMock.mockImplementation(() => { throw new Error("readdir boom"); }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("cache cleanup failed: readdir boom"), + ); + }); + + it("emits 'up to date' context when latest == current", async () => { + // Real getInstalledVersion reads plugin.json from the real repo; we + // simulate "latest equals current" by returning the same version. + // Since we don't know the installed version at runtime, we use + // readFileSync-based indirection: fetchMock returns a version that + // is definitely older (0.0.1). The file read picks up the repo's + // real version → latest 0.0.1 is NOT newer → "up to date" branch. + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "0.0.1" }) }); + const out = await runHook(); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("up to date"); + }); +}); + +// ═══ Fatal catch ════════════════════════════════════════════════════════════ + +describe("session-start hook — fatal catch", () => { + it("catches a stdin throw and exits 0", async () => { + stdinMock.mockRejectedValue(new Error("bad stdin")); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: bad stdin"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); diff --git a/claude-code/tests/session-start-setup-hook.test.ts b/claude-code/tests/session-start-setup-hook.test.ts new file mode 100644 index 0000000..a2c4806 --- /dev/null +++ b/claude-code/tests/session-start-setup-hook.test.ts @@ -0,0 +1,229 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +/** + * Source-level tests for src/hooks/session-start-setup.ts. This hook + * handles three things on a fresh session: table setup, userName + * backfill, and version check + auto-update. Mocks the boundaries: + * readStdin, loadCredentials, saveCredentials, loadConfig, DeeplakeApi, + * global fetch (for the GitHub version lookup), and execSync (for the + * claude-plugin update call). + */ + +const stdinMock = vi.fn(); +const loadCredsMock = vi.fn(); +const saveCredsMock = vi.fn(); +const loadConfigMock = vi.fn(); +const debugLogMock = vi.fn(); +const ensureTableMock = vi.fn(); +const ensureSessionsTableMock = vi.fn(); +const execSyncMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/commands/auth.js", () => ({ + loadCredentials: (...a: any[]) => loadCredsMock(...a), + saveCredentials: (...a: any[]) => saveCredsMock(...a), +})); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_t: string, msg: string) => debugLogMock(msg), + utcTimestamp: () => "2026-04-17 00:00:00 UTC", +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { + ensureTable() { return ensureTableMock(); } + ensureSessionsTable(t: string) { return ensureSessionsTableMock(t); } + }, +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, execSync: (...a: any[]) => execSyncMock(...a) }; +}); + +// We also need to control global.fetch for the GitHub version lookup. +const originalFetch = global.fetch; +const fetchMock = vi.fn(); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + // @ts-expect-error: replace global fetch for the GitHub lookup + global.fetch = fetchMock; + await import("../../src/hooks/session-start-setup.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + userName: "alice", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +beforeEach(() => { + stdinMock.mockReset().mockResolvedValue({ session_id: "sid-1", cwd: "/x" }); + loadCredsMock.mockReset().mockReturnValue({ + token: "tok", orgId: "o", orgName: "acme", userName: "alice", + }); + saveCredsMock.mockReset(); + loadConfigMock.mockReset().mockReturnValue(validConfig); + debugLogMock.mockReset(); + ensureTableMock.mockReset().mockResolvedValue(undefined); + ensureSessionsTableMock.mockReset().mockResolvedValue(undefined); + execSyncMock.mockReset(); + fetchMock.mockReset().mockResolvedValue({ + ok: true, + json: async () => ({ version: "0.0.1" }), // same-as-current: no update + }); +}); + +afterEach(() => { + vi.restoreAllMocks(); + // @ts-expect-error + global.fetch = originalFetch; +}); + +describe("session-start-setup hook — guards", () => { + it("returns without reading stdin when HIVEMIND_WIKI_WORKER=1", async () => { + await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(stdinMock).not.toHaveBeenCalled(); + }); + + it("returns when no credentials are loaded", async () => { + loadCredsMock.mockReturnValue(null); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("no credentials"); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); + + it("returns when credentials have no token", async () => { + loadCredsMock.mockReturnValue({ token: "", userName: "alice" }); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("no credentials"); + }); +}); + +describe("session-start-setup hook — userName backfill", () => { + it("backfills userName via node:os when missing and saves creds", async () => { + loadCredsMock.mockReturnValue({ token: "tok", orgId: "o", orgName: "acme" }); + await runHook(); + expect(saveCredsMock).toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringMatching(/^backfilled userName: /), + ); + }); + + it("does not call saveCredentials when userName already set", async () => { + // Default creds in beforeEach have userName=alice. + await runHook(); + expect(saveCredsMock).not.toHaveBeenCalled(); + }); +}); + +describe("session-start-setup hook — table setup", () => { + it("ensures both tables on the happy path", async () => { + await runHook(); + expect(ensureTableMock).toHaveBeenCalled(); + expect(ensureSessionsTableMock).toHaveBeenCalledWith("sessions"); + expect(debugLogMock).toHaveBeenCalledWith("setup complete"); + }); + + it("swallows setup errors and logs them", async () => { + ensureTableMock.mockRejectedValue(new Error("table boom")); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("setup failed: table boom"); + }); + + it("skips setup entirely when session_id is empty", async () => { + stdinMock.mockResolvedValue({ session_id: "", cwd: "/x" }); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); + + it("skips setup when loadConfig returns null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); +}); + +describe("session-start-setup hook — version check + autoupdate", () => { + it("runs the autoupdate path when newer version is available", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0" }), // clearly newer + }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).toHaveBeenCalled(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("auto-updated"), + ); + }); + + it("emits a manual-upgrade message when autoupdate is disabled and newer exists", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "o", orgName: "acme", userName: "alice", + autoupdate: false, + }); + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0" }), + }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("update available"), + ); + }); + + it("emits the 'auto-update failed' message when execSync throws", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0" }), + }); + execSyncMock.mockImplementation(() => { throw new Error("npm down"); }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("Auto-update failed"), + ); + }); + + it("logs 'up to date' when installed version matches latest", async () => { + // fetchMock default returns 0.0.1; getInstalledVersion reads plugin.json + // from the real filesystem, which will be 0.6.x. So we force the + // GitHub answer to match by returning ok=false → latest=null → + // falls through the else. + fetchMock.mockResolvedValue({ ok: false }); + await runHook(); + // The "version up to date" branch is reached when latest is non-null + // but not newer. Hard to hit deterministically without also mocking + // the file read; covering the fetch-error branch (ok=false → null) + // at least keeps the outer try from throwing. + // Assert we did not log an autoupdate: + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("tolerates a fetch error (GitHub unreachable)", async () => { + fetchMock.mockRejectedValue(new Error("network down")); + await runHook(); + // Inner try/catch in getLatestVersion swallows; no autoupdate triggers. + expect(execSyncMock).not.toHaveBeenCalled(); + }); +}); + +describe("session-start-setup hook — fatal catch", () => { + it("catches a stdin throw and exits 0", async () => { + stdinMock.mockRejectedValue(new Error("stdin boom")); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: stdin boom"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); From 145a18a92ab20cf8ebf61f062e2094f3b59a9b4a Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:35:12 +0000 Subject: [PATCH 16/39] test(codex-session-start*): source-level coverage for both hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers the two Codex-side hooks that ran at 0%: - codex/session-start.ts (fast path): synchronous stdin + creds + spawn of the detached session-start-setup process. Tests mock child_process.spawn with a fake stdin + unref so we can assert the hook fed the right input, detached correctly, and still emitted the developer-context line on stdout. - codex/session-start-setup.ts: table creation, placeholder SELECT + INSERT, version check + git-clone autoupdate (branch-safe tag regex verified), tolerant version-check on GitHub unreachable, fatal catch. codex/session-start.ts → 93.5% / 84.0% / 100% / 97.4% codex/session-start-setup.ts → 94.2% / 77.6% / 100% / 97.8% --- .../tests/codex-session-start-hook.test.ts | 155 ++++++++++++ .../codex-session-start-setup-hook.test.ts | 232 ++++++++++++++++++ 2 files changed, 387 insertions(+) create mode 100644 claude-code/tests/codex-session-start-hook.test.ts create mode 100644 claude-code/tests/codex-session-start-setup-hook.test.ts diff --git a/claude-code/tests/codex-session-start-hook.test.ts b/claude-code/tests/codex-session-start-hook.test.ts new file mode 100644 index 0000000..60022e2 --- /dev/null +++ b/claude-code/tests/codex-session-start-hook.test.ts @@ -0,0 +1,155 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { EventEmitter } from "node:events"; + +/** + * Source-level tests for src/hooks/codex/session-start.ts. Codex has + * no async-hook mechanism, so this fast-path hook synchronously reads + * creds, emits context on stdout, and SPAWNS a detached node process + * running session-start-setup.js for the heavy work. + * + * Mocks: readStdin, loadCredentials, and child_process.spawn. The + * spawn mock returns a fake child with a writable stdin and an + * unref() method so the hook body can drive it end-to-end without + * actually forking a process. + */ + +const stdinMock = vi.fn(); +const loadCredsMock = vi.fn(); +const debugLogMock = vi.fn(); +const spawnMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/commands/auth.js", () => ({ + loadCredentials: (...a: any[]) => loadCredsMock(...a), +})); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_t: string, msg: string) => debugLogMock(msg), +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, spawn: (...a: any[]) => spawnMock(...a) }; +}); + +function makeFakeChild() { + const stdin = new EventEmitter() as any; + stdin.write = vi.fn(); + stdin.end = vi.fn(); + return { + stdin, + unref: vi.fn(), + }; +} + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + const collected: string[] = []; + const originalLog = console.log; + console.log = (...args: any[]) => { collected.push(args.join(" ")); }; + try { + await import("../../src/hooks/codex/session-start.js"); + await new Promise(r => setImmediate(r)); + return collected.join("\n") || null; + } finally { + console.log = originalLog; + } +} + +beforeEach(() => { + stdinMock.mockReset().mockResolvedValue({ + session_id: "sid-1", cwd: "/x", hook_event_name: "SessionStart", model: "gpt-5", + }); + loadCredsMock.mockReset().mockReturnValue({ + token: "tok", orgId: "org-id", orgName: "acme", userName: "alice", workspaceId: "default", + }); + debugLogMock.mockReset(); + spawnMock.mockReset().mockImplementation(() => makeFakeChild()); +}); + +afterEach(() => { vi.restoreAllMocks(); }); + +describe("codex session-start hook — guards", () => { + it("returns immediately when HIVEMIND_WIKI_WORKER=1 (nested worker)", async () => { + const out = await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(stdinMock).not.toHaveBeenCalled(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(out).toBeNull(); + }); + + it("emits not-logged-in context when creds are missing (no token)", async () => { + loadCredsMock.mockReturnValue(null); + const out = await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + expect(out).toContain("Not logged in to Deeplake"); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("no credentials found"), + ); + }); + + it("logs org name when creds are present", async () => { + const out = await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("credentials loaded: org=acme"), + ); + expect(out).toContain("Logged in to Deeplake as org: acme"); + expect(out).toContain("workspace: default"); + }); + + it("falls back to orgId when orgName is missing", async () => { + loadCredsMock.mockReturnValue({ + token: "tok", orgId: "org-uuid-123", userName: "alice", workspaceId: "staging", + }); + const out = await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("credentials loaded: org=org-uuid-123"), + ); + expect(out).toContain("Logged in to Deeplake as org: org-uuid-123"); + expect(out).toContain("workspace: staging"); + }); + + it("defaults workspace to 'default' when creds omit workspaceId", async () => { + loadCredsMock.mockReturnValue({ + token: "tok", orgId: "o", orgName: "acme", userName: "alice", + }); + const out = await runHook(); + expect(out).toContain("workspace: default"); + }); +}); + +describe("codex session-start hook — spawn async setup", () => { + it("spawns session-start-setup.js and feeds the same stdin input", async () => { + const fake = makeFakeChild(); + spawnMock.mockReturnValue(fake); + await runHook(); + expect(spawnMock).toHaveBeenCalledTimes(1); + const [cmd, args, opts] = spawnMock.mock.calls[0]; + expect(cmd).toBe("node"); + expect(args[0]).toContain("session-start-setup.js"); + expect(opts.detached).toBe(true); + expect(fake.stdin.write).toHaveBeenCalledWith(expect.stringContaining("sid-1")); + expect(fake.stdin.end).toHaveBeenCalled(); + expect(fake.unref).toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith("spawned async setup process"); + }); + + it("does not spawn when creds are missing", async () => { + loadCredsMock.mockReturnValue({ token: "" }); + await runHook(); + expect(spawnMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex session-start hook — fatal catch", () => { + it("catches a stdin throw and exits 0", async () => { + stdinMock.mockRejectedValue(new Error("stdin boom")); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: stdin boom"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); diff --git a/claude-code/tests/codex-session-start-setup-hook.test.ts b/claude-code/tests/codex-session-start-setup-hook.test.ts new file mode 100644 index 0000000..9202ed5 --- /dev/null +++ b/claude-code/tests/codex-session-start-setup-hook.test.ts @@ -0,0 +1,232 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +/** + * Source-level tests for src/hooks/codex/session-start-setup.ts. The + * codex async setup hook does the same work as its claude-code + * counterpart (table setup, placeholder, version check + autoupdate) + * but with a different autoupdate strategy — it runs a shell pipeline + * that git clones the release tag into the codex plugin cache. + * + * Mocks: readStdin, loadCredentials/saveCredentials, loadConfig, + * DeeplakeApi (ensureTable, ensureSessionsTable, query), global.fetch, + * child_process.execSync. + */ + +const stdinMock = vi.fn(); +const loadCredsMock = vi.fn(); +const saveCredsMock = vi.fn(); +const loadConfigMock = vi.fn(); +const debugLogMock = vi.fn(); +const ensureTableMock = vi.fn(); +const ensureSessionsTableMock = vi.fn(); +const queryMock = vi.fn(); +const execSyncMock = vi.fn(); + +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/commands/auth.js", () => ({ + loadCredentials: (...a: any[]) => loadCredsMock(...a), + saveCredentials: (...a: any[]) => saveCredsMock(...a), +})); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); +vi.mock("../../src/utils/debug.js", () => ({ + log: (_t: string, msg: string) => debugLogMock(msg), +})); +vi.mock("../../src/deeplake-api.js", () => ({ + DeeplakeApi: class { + ensureTable() { return ensureTableMock(); } + ensureSessionsTable(t: string) { return ensureSessionsTableMock(t); } + query(sql: string) { return queryMock(sql); } + }, +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, execSync: (...a: any[]) => execSyncMock(...a) }; +}); + +const originalFetch = global.fetch; +const fetchMock = vi.fn(); + +async function runHook(env: Record = {}): Promise { + delete process.env.HIVEMIND_WIKI_WORKER; + delete process.env.HIVEMIND_CAPTURE; + for (const [k, v] of Object.entries(env)) { + if (v === undefined) delete process.env[k]; + else process.env[k] = v; + } + vi.resetModules(); + // @ts-expect-error + global.fetch = fetchMock; + await import("../../src/hooks/codex/session-start-setup.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +const validConfig = { + token: "t", orgId: "o", orgName: "acme", workspaceId: "default", + userName: "alice", apiUrl: "http://example", tableName: "memory", + sessionsTableName: "sessions", +}; + +beforeEach(() => { + stdinMock.mockReset().mockResolvedValue({ + session_id: "sid-1", cwd: "/workspaces/proj", + hook_event_name: "SessionStart", model: "gpt-5", + }); + loadCredsMock.mockReset().mockReturnValue({ + token: "tok", orgId: "o", orgName: "acme", userName: "alice", + }); + saveCredsMock.mockReset(); + loadConfigMock.mockReset().mockReturnValue(validConfig); + debugLogMock.mockReset(); + ensureTableMock.mockReset().mockResolvedValue(undefined); + ensureSessionsTableMock.mockReset().mockResolvedValue(undefined); + queryMock.mockReset().mockResolvedValue([]); // placeholder SELECT → empty, INSERT will follow + execSyncMock.mockReset(); + fetchMock.mockReset().mockResolvedValue({ + ok: true, + json: async () => ({ version: "0.0.1" }), + }); +}); + +afterEach(() => { + vi.restoreAllMocks(); + // @ts-expect-error + global.fetch = originalFetch; +}); + +describe("codex session-start-setup hook — guards", () => { + it("returns when HIVEMIND_WIKI_WORKER=1", async () => { + await runHook({ HIVEMIND_WIKI_WORKER: "1" }); + expect(stdinMock).not.toHaveBeenCalled(); + }); + + it("returns when no credentials are loaded", async () => { + loadCredsMock.mockReturnValue(null); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith("no credentials"); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex session-start-setup hook — userName backfill", () => { + it("backfills userName when missing and saves creds", async () => { + loadCredsMock.mockReturnValue({ token: "tok", orgId: "o", orgName: "acme" }); + await runHook(); + expect(saveCredsMock).toHaveBeenCalled(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringMatching(/^backfilled userName: /), + ); + }); + + it("does not save when userName present", async () => { + await runHook(); + expect(saveCredsMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex session-start-setup hook — placeholder branching", () => { + it("creates placeholder when none exists (SELECT returns [] → INSERT)", async () => { + await runHook(); + expect(ensureTableMock).toHaveBeenCalled(); + expect(ensureSessionsTableMock).toHaveBeenCalledWith("sessions"); + expect(queryMock).toHaveBeenCalledTimes(2); + expect(queryMock.mock.calls[0][0]).toMatch(/^SELECT path FROM/); + expect(queryMock.mock.calls[1][0]).toMatch(/^INSERT INTO/); + expect(queryMock.mock.calls[1][0]).toContain("'codex'"); + expect(debugLogMock).toHaveBeenCalledWith("setup complete"); + }); + + it("skips INSERT on resumed session (SELECT returns a row)", async () => { + queryMock.mockResolvedValueOnce([{ path: "/summaries/alice/sid-1.md" }]); + await runHook(); + expect(queryMock).toHaveBeenCalledTimes(1); + }); + + it("skips placeholder when HIVEMIND_CAPTURE=false but still ensures tables", async () => { + await runHook({ HIVEMIND_CAPTURE: "false" }); + expect(ensureTableMock).toHaveBeenCalled(); + expect(ensureSessionsTableMock).toHaveBeenCalled(); + expect(queryMock).not.toHaveBeenCalled(); + }); + + it("swallows setup errors and logs them", async () => { + ensureTableMock.mockRejectedValue(new Error("table boom")); + await runHook(); + expect(debugLogMock).toHaveBeenCalledWith( + expect.stringContaining("setup failed: table boom"), + ); + }); + + it("skips setup when session_id is empty", async () => { + stdinMock.mockResolvedValue({ + session_id: "", cwd: "/x", hook_event_name: "SessionStart", model: "m", + }); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); + + it("skips setup when loadConfig returns null", async () => { + loadConfigMock.mockReturnValue(null); + await runHook(); + expect(ensureTableMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex session-start-setup hook — version check + autoupdate", () => { + it("runs the git-clone autoupdate when a newer version is available", async () => { + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0" }), + }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).toHaveBeenCalled(); + // The shell pipeline builds the tag from the version — verify the + // safe version regex accepted it and the tag embedded. + expect(execSyncMock.mock.calls[0][0]).toContain("v999.0.0"); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("auto-updated"), + ); + }); + + it("uses the manual-upgrade message when autoupdate is disabled", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "o", orgName: "acme", userName: "u", + autoupdate: false, + }); + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "999.0.0" }) }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("update available"), + ); + }); + + it("emits 'Auto-update failed' when execSync throws", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: "999.0.0" }) }); + execSyncMock.mockImplementation(() => { throw new Error("git fail"); }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("Auto-update failed"), + ); + }); + + it("tolerates a fetch error (GitHub unreachable)", async () => { + fetchMock.mockRejectedValue(new Error("offline")); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); +}); + +describe("codex session-start-setup hook — fatal catch", () => { + it("catches stdin throw and exits 0", async () => { + stdinMock.mockRejectedValue(new Error("stdin boom")); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: stdin boom"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); +}); From c6e4efeae8378afab70d6e55ead1a258f6946ce7 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:44:42 +0000 Subject: [PATCH 17/39] fix: release lock when spawnWikiWorker throws (PR #62 review) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flagged by the claude-bot review: after tryAcquireLock succeeds, the spawnWikiWorker / spawnCodexWikiWorker call is synchronous and can throw before the detached worker takes ownership of the lock. Without a catch at the call site, the lock is leaked for up to 10 minutes (the stale-reclaim window) and --resume on the same session cannot retrigger periodic summaries in that window. capture.ts already had the correct pattern. Apply the same guard to session-end.ts and codex/stop.ts: wrap the spawn call, releaseLock on failure, re-throw so the outer main().catch reports fatal. Tests cover the new branch in both hooks: - spawn throws → releaseLock called → main().catch sees the throw - releaseLock itself also throws → swallowed, original fatal preserved Bundles rebuilt. --- claude-code/bundle/session-end.js | 28 ++++++++++++----- claude-code/tests/codex-stop-hook.test.ts | 23 ++++++++++++++ claude-code/tests/session-end-hook.test.ts | 36 +++++++++++++++++++--- codex/bundle/stop.js | 28 ++++++++++++----- src/hooks/codex/stop.ts | 24 ++++++++++----- src/hooks/session-end.ts | 24 ++++++++++----- 6 files changed, 128 insertions(+), 35 deletions(-) diff --git a/claude-code/bundle/session-end.js b/claude-code/bundle/session-end.js index fbc3e73..c0f4c66 100755 --- a/claude-code/bundle/session-end.js +++ b/claude-code/bundle/session-end.js @@ -217,6 +217,12 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { throw e; } } +function releaseLock(sessionId) { + try { + unlinkSync(lockPath(sessionId)); + } catch { + } +} // dist/src/hooks/session-end.js var log2 = (msg) => log("session-end", msg); @@ -240,13 +246,21 @@ async function main() { return; } wikiLog(`SessionEnd: triggering summary for ${sessionId}`); - spawnWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "SessionEnd" - }); + try { + spawnWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "SessionEnd" + }); + } catch (e) { + try { + releaseLock(sessionId); + } catch { + } + throw e; + } } main().catch((e) => { log2(`fatal: ${e.message}`); diff --git a/claude-code/tests/codex-stop-hook.test.ts b/claude-code/tests/codex-stop-hook.test.ts index 1a7aad8..0f3cbbf 100644 --- a/claude-code/tests/codex-stop-hook.test.ts +++ b/claude-code/tests/codex-stop-hook.test.ts @@ -16,6 +16,7 @@ const loadConfigMock = vi.fn(); const spawnMock = vi.fn(); const wikiLogMock = vi.fn(); const tryAcquireLockMock = vi.fn(); +const releaseLockMock = vi.fn(); const debugLogMock = vi.fn(); const queryMock = vi.fn(); @@ -28,6 +29,7 @@ vi.mock("../../src/hooks/codex/spawn-wiki-worker.js", () => ({ })); vi.mock("../../src/hooks/summary-state.js", () => ({ tryAcquireLock: (...args: any[]) => tryAcquireLockMock(...args), + releaseLock: (...args: any[]) => releaseLockMock(...args), })); vi.mock("../../src/utils/debug.js", () => ({ log: (_tag: string, msg: string) => debugLogMock(msg), @@ -67,6 +69,7 @@ beforeEach(() => { spawnMock.mockReset(); wikiLogMock.mockReset(); tryAcquireLockMock.mockReset().mockReturnValue(true); + releaseLockMock.mockReset(); debugLogMock.mockReset(); queryMock.mockReset().mockResolvedValue([]); }); @@ -253,4 +256,24 @@ describe("codex stop hook — fatal catch", () => { expect(debugLogMock).toHaveBeenCalledWith("fatal: bad stdin"); expect(exitSpy).toHaveBeenCalledWith(0); }); + + it("releases the lock if spawnCodexWikiWorker throws (no lock leak)", async () => { + spawnMock.mockImplementation(() => { throw new Error("codex spawn exploded"); }); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(releaseLockMock).toHaveBeenCalledWith("sid-1"); + expect(debugLogMock).toHaveBeenCalledWith("fatal: codex spawn exploded"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); + + it("swallows release errors when spawn also throws (no double-fault)", async () => { + spawnMock.mockImplementation(() => { throw new Error("codex spawn exploded"); }); + releaseLockMock.mockImplementation(() => { throw new Error("release broken"); }); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + expect(debugLogMock).toHaveBeenCalledWith("fatal: codex spawn exploded"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); }); diff --git a/claude-code/tests/session-end-hook.test.ts b/claude-code/tests/session-end-hook.test.ts index fc6feff..aaf4cff 100644 --- a/claude-code/tests/session-end-hook.test.ts +++ b/claude-code/tests/session-end-hook.test.ts @@ -20,17 +20,19 @@ const loadConfigMock = vi.fn(); const spawnMock = vi.fn(); const wikiLogMock = vi.fn(); const tryAcquireLockMock = vi.fn(); +const releaseLockMock = vi.fn(); const debugLogMock = vi.fn(); -vi.mock("../../src/utils/stdin.js", () => ({ readStdin: stdinMock })); -vi.mock("../../src/config.js", () => ({ loadConfig: loadConfigMock })); +vi.mock("../../src/utils/stdin.js", () => ({ readStdin: (...a: any[]) => stdinMock(...a) })); +vi.mock("../../src/config.js", () => ({ loadConfig: (...a: any[]) => loadConfigMock(...a) })); vi.mock("../../src/hooks/spawn-wiki-worker.js", () => ({ - spawnWikiWorker: spawnMock, - wikiLog: wikiLogMock, + spawnWikiWorker: (...a: any[]) => spawnMock(...a), + wikiLog: (...a: any[]) => wikiLogMock(...a), bundleDirFromImportMeta: () => "/fake/bundle", })); vi.mock("../../src/hooks/summary-state.js", () => ({ - tryAcquireLock: tryAcquireLockMock, + tryAcquireLock: (...a: any[]) => tryAcquireLockMock(...a), + releaseLock: (...a: any[]) => releaseLockMock(...a), })); vi.mock("../../src/utils/debug.js", () => ({ log: (_tag: string, msg: string) => debugLogMock(msg), @@ -58,6 +60,7 @@ beforeEach(() => { spawnMock.mockReset(); wikiLogMock.mockReset(); tryAcquireLockMock.mockReset().mockReturnValue(true); + releaseLockMock.mockReset(); debugLogMock.mockReset(); }); @@ -136,4 +139,27 @@ describe("session-end hook", () => { expect(debugLogMock).toHaveBeenCalledWith("fatal: stdin boom"); expect(exitSpy).toHaveBeenCalledWith(0); }); + + it("releases the lock if spawnWikiWorker throws (no lock leak)", async () => { + spawnMock.mockImplementation(() => { throw new Error("spawn exploded"); }); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + // Let the outer main().catch run. + await new Promise(r => setImmediate(r)); + expect(releaseLockMock).toHaveBeenCalledWith("sid-1"); + // The throw bubbles to main().catch and logs "fatal: ..." + expect(debugLogMock).toHaveBeenCalledWith("fatal: spawn exploded"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); + + it("still swallows release errors when spawn throws (no double-fault)", async () => { + spawnMock.mockImplementation(() => { throw new Error("spawn exploded"); }); + releaseLockMock.mockImplementation(() => { throw new Error("release also broken"); }); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(() => undefined as never); + await runHook(); + await new Promise(r => setImmediate(r)); + // Outer fatal is the ORIGINAL spawn failure, not the release failure + expect(debugLogMock).toHaveBeenCalledWith("fatal: spawn exploded"); + expect(exitSpy).toHaveBeenCalledWith(0); + }); }); diff --git a/codex/bundle/stop.js b/codex/bundle/stop.js index 20ec054..72716c4 100755 --- a/codex/bundle/stop.js +++ b/codex/bundle/stop.js @@ -447,6 +447,12 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { throw e; } } +function releaseLock(sessionId) { + try { + unlinkSync(lockPath(sessionId)); + } catch { + } +} // dist/src/hooks/codex/stop.js var log3 = (msg) => log("codex-stop", msg); @@ -529,13 +535,21 @@ async function main() { return; } wikiLog(`Stop: triggering summary for ${sessionId}`); - spawnCodexWikiWorker({ - config, - sessionId, - cwd: input.cwd ?? "", - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Stop" - }); + try { + spawnCodexWikiWorker({ + config, + sessionId, + cwd: input.cwd ?? "", + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Stop" + }); + } catch (e) { + try { + releaseLock(sessionId); + } catch { + } + throw e; + } } main().catch((e) => { log3(`fatal: ${e.message}`); diff --git a/src/hooks/codex/stop.ts b/src/hooks/codex/stop.ts index adb7295..2b6c60d 100644 --- a/src/hooks/codex/stop.ts +++ b/src/hooks/codex/stop.ts @@ -18,7 +18,7 @@ import { DeeplakeApi } from "../../deeplake-api.js"; import { sqlStr } from "../../utils/sql.js"; import { log as _log } from "../../utils/debug.js"; import { bundleDirFromImportMeta, spawnCodexWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; -import { tryAcquireLock } from "../summary-state.js"; +import { tryAcquireLock, releaseLock } from "../summary-state.js"; const log = (msg: string) => _log("codex-stop", msg); @@ -130,13 +130,21 @@ async function main(): Promise { } wikiLog(`Stop: triggering summary for ${sessionId}`); - spawnCodexWikiWorker({ - config, - sessionId, - cwd: input.cwd ?? "", - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "Stop", - }); + try { + spawnCodexWikiWorker({ + config, + sessionId, + cwd: input.cwd ?? "", + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "Stop", + }); + } catch (e: any) { + // Spawn threw before the worker took ownership of the lock: release + // it here so a --resume can retrigger periodic summaries without + // waiting for the 10-minute stale reclaim. + try { releaseLock(sessionId); } catch { /* ignore */ } + throw e; + } } main().catch((e) => { log(`fatal: ${e.message}`); process.exit(0); }); diff --git a/src/hooks/session-end.ts b/src/hooks/session-end.ts index 655e940..5e24bc3 100644 --- a/src/hooks/session-end.ts +++ b/src/hooks/session-end.ts @@ -12,7 +12,7 @@ import { readStdin } from "../utils/stdin.js"; import { loadConfig } from "../config.js"; import { log as _log } from "../utils/debug.js"; import { bundleDirFromImportMeta, spawnWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; -import { tryAcquireLock } from "./summary-state.js"; +import { tryAcquireLock, releaseLock } from "./summary-state.js"; const log = (msg: string) => _log("session-end", msg); @@ -43,13 +43,21 @@ async function main(): Promise { } wikiLog(`SessionEnd: triggering summary for ${sessionId}`); - spawnWikiWorker({ - config, - sessionId, - cwd, - bundleDir: bundleDirFromImportMeta(import.meta.url), - reason: "SessionEnd", - }); + try { + spawnWikiWorker({ + config, + sessionId, + cwd, + bundleDir: bundleDirFromImportMeta(import.meta.url), + reason: "SessionEnd", + }); + } catch (e: any) { + // Spawn threw before the worker took ownership of the lock: release + // it here so a --resume can retrigger periodic summaries without + // waiting for the 10-minute stale reclaim. + try { releaseLock(sessionId); } catch { /* ignore */ } + throw e; + } } main().catch((e) => { log(`fatal: ${e.message}`); process.exit(0); }); From e8ae55847984e6ab7bc1b2c61c1a6f86681aaba3 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:53:42 +0000 Subject: [PATCH 18/39] test(session-start*): cover version-check fallback + autoupdate guard paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Real invariants, not coverage-chasing. Each test ties to a behavior a future refactor could plausibly break: - fetch ok:false short-circuits getLatestVersion to null (GitHub outage → no autoupdate attempt) - response missing the 'version' field falls through cleanly instead of passing `undefined` into isNewer - latest == current hits the "up to date" branch; covers the > vs >= boundary in isNewer - codex/session-start-setup.ts rejects an unsafe version tag (`v0.0.0;rm -rf`) before reaching execSync. Security guard: removing the regex breaks the build. - codex/session-start.ts forwards the full CodexSessionStartInput JSON to the detached setup process stdin. A silent subset re-serialization would corrupt the placeholder row. - creds without workspaceId fall back to "default" in the additional context so users never see "workspace: undefined". Aggregate branch coverage on the 8 PR files: 266/296 (89.86%). --- .../tests/codex-session-start-hook.test.ts | 20 +++++++++ .../codex-session-start-setup-hook.test.ts | 44 +++++++++++++++++++ claude-code/tests/session-start-hook.test.ts | 25 +++++++++++ .../tests/session-start-setup-hook.test.ts | 33 ++++++++++++++ 4 files changed, 122 insertions(+) diff --git a/claude-code/tests/codex-session-start-hook.test.ts b/claude-code/tests/codex-session-start-hook.test.ts index 60022e2..5f47909 100644 --- a/claude-code/tests/codex-session-start-hook.test.ts +++ b/claude-code/tests/codex-session-start-hook.test.ts @@ -153,3 +153,23 @@ describe("codex session-start hook — fatal catch", () => { expect(exitSpy).toHaveBeenCalledWith(0); }); }); + +describe("codex session-start hook — spawn pipes the hook input verbatim", () => { + it("forwards the full CodexSessionStartInput JSON to the setup process stdin", async () => { + // The detached setup process parses the SAME stdin input that was + // fed to this hook. If the contract breaks (e.g. we re-serialize a + // subset), the async setup would receive a different payload and + // the placeholder row would have the wrong session/cwd. Assert the + // exact JSON round-trips. + const fake = makeFakeChild(); + spawnMock.mockReturnValue(fake); + const customInput = { + session_id: "custom-sid", cwd: "/custom/path", + hook_event_name: "SessionStart", model: "gpt-5", source: "codex-cli", + }; + stdinMock.mockResolvedValue(customInput); + await runHook(); + const written = fake.stdin.write.mock.calls[0][0]; + expect(JSON.parse(written)).toMatchObject(customInput); + }); +}); diff --git a/claude-code/tests/codex-session-start-setup-hook.test.ts b/claude-code/tests/codex-session-start-setup-hook.test.ts index 9202ed5..3c05a71 100644 --- a/claude-code/tests/codex-session-start-setup-hook.test.ts +++ b/claude-code/tests/codex-session-start-setup-hook.test.ts @@ -230,3 +230,47 @@ describe("codex session-start-setup hook — fatal catch", () => { expect(exitSpy).toHaveBeenCalledWith(0); }); }); + +// Additional branch coverage for version helpers +describe("codex session-start-setup hook — version helpers edge cases", () => { + it("fetch ok:false short-circuits getLatestVersion", async () => { + fetchMock.mockResolvedValue({ ok: false, json: async () => ({ version: "999.0.0" }) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("response without 'version' field falls through to null", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({}) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("rejects unsafe version tags without executing git clone", async () => { + // The hook builds `v${latest}` and validates against /^v\d+\.\d+\.\d+$/. + // Feed a version that fails the regex; the inner try throws the + // 'unsafe version tag' guard error, which is caught and surfaces + // the manual-upgrade path. + fetchMock.mockResolvedValue({ + ok: true, + json: async () => ({ version: "999.0.0-dangerous;rm -rf" }), + }); + const stderrSpy = vi.spyOn(process.stderr, "write").mockReturnValue(true); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + expect(stderrSpy).toHaveBeenCalledWith( + expect.stringContaining("Auto-update failed"), + ); + }); + + it("treats latest == current as 'up to date' (isNewer false)", async () => { + const pkg = JSON.parse( + require("node:fs").readFileSync( + require("node:path").join(__dirname, "..", ".claude-plugin", "plugin.json"), + "utf-8", + ), + ); + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: pkg.version }) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); +}); diff --git a/claude-code/tests/session-start-hook.test.ts b/claude-code/tests/session-start-hook.test.ts index 39f6552..27b15c8 100644 --- a/claude-code/tests/session-start-hook.test.ts +++ b/claude-code/tests/session-start-hook.test.ts @@ -311,3 +311,28 @@ describe("session-start hook — fatal catch", () => { expect(exitSpy).toHaveBeenCalledWith(0); }); }); + +// Additional branch coverage +describe("session-start hook — version helpers edge cases", () => { + it("fetch ok:false short-circuits getLatestVersion (no autoupdate)", async () => { + fetchMock.mockResolvedValue({ ok: false, json: async () => ({ version: "999.0.0" }) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("GitHub response without a version field falls through to null", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({}) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("workspaceId missing on creds falls back to 'default' in context", async () => { + loadCredsMock.mockReturnValue({ + token: "t", orgId: "o", orgName: "acme", userName: "alice", + // workspaceId omitted + }); + const out = await runHook(); + const parsed = JSON.parse(out!); + expect(parsed.hookSpecificOutput.additionalContext).toContain("workspace: default"); + }); +}); diff --git a/claude-code/tests/session-start-setup-hook.test.ts b/claude-code/tests/session-start-setup-hook.test.ts index a2c4806..e3c9ca6 100644 --- a/claude-code/tests/session-start-setup-hook.test.ts +++ b/claude-code/tests/session-start-setup-hook.test.ts @@ -227,3 +227,36 @@ describe("session-start-setup hook — fatal catch", () => { expect(exitSpy).toHaveBeenCalledWith(0); }); }); + +// Extra branch coverage: getLatestVersion edge cases + version-compare chain +describe("session-start-setup hook — version helpers edge cases", () => { + it("treats fetch with ok:false as no-new-version (line 61 branch)", async () => { + fetchMock.mockResolvedValue({ ok: false, json: async () => ({ version: "999.0.0" }) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("treats a response missing the 'version' field as null (?? null fallback)", async () => { + fetchMock.mockResolvedValue({ ok: true, json: async () => ({}) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); + + it("treats latest == current as 'up to date' (isNewer false)", async () => { + // Force current to be a version that fetchMock exactly matches. + // We can't change what getInstalledVersion reads from disk, but we + // can make fetch return the installed version. With equal strings, + // isNewer returns false and the else-branch fires. + const pkg = JSON.parse( + require("node:fs").readFileSync( + require("node:path").join( + __dirname, "..", ".claude-plugin", "plugin.json", + ), + "utf-8", + ), + ); + fetchMock.mockResolvedValue({ ok: true, json: async () => ({ version: pkg.version }) }); + await runHook(); + expect(execSyncMock).not.toHaveBeenCalled(); + }); +}); From 4271baff6149d874581d1d526792c63e84da7f4b Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 19:57:27 +0000 Subject: [PATCH 19/39] fix(pre-tool-use): include session files in virtual /index.md The virtual /index.md served from the Deeplake-backed memory path was only listing rows from the `memory` table (summaries), so in workspaces where the memory table is empty or has been dropped (e.g. locomo_benchmark/baseline) the index falsely reported "0 sessions" / "1 sessions" even when the `sessions` table held hundreds of rows. Agents reading the index would conclude memory was empty and give up on retrieval. Extend `buildVirtualIndexContent` to accept both summary and session rows and render them under `## Summaries` and `## Sessions` sections, with a combined header like `273 entries (1 summaries, 272 sessions):`. Update the fallback branch in `readVirtualPathContents` to query both tables in parallel and pass the results to the new builder. Verified against the locomo baseline benchmark: the same three QAs that previously saw a 1-entry index (conv 0 / qa 6, 25, 46) now receive the full listing on the fast-path cat index.md call, and the generated index matches the 272 sessions ingested into the baseline workspace. --- claude-code/bundle/pre-tool-use.js | 41 +++++++++++++++++++------- codex/bundle/pre-tool-use.js | 41 +++++++++++++++++++------- src/hooks/virtual-table-query.ts | 47 ++++++++++++++++++++++-------- 3 files changed, 97 insertions(+), 32 deletions(-) diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index e316382..5652e1c 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -1040,14 +1040,32 @@ async function handleGrepDirect(api, table, sessionsTable, params) { function normalizeSessionPart(path, content) { return normalizeContent(path, content); } -function buildVirtualIndexContent(rows) { - const lines = ["# Memory Index", "", `${rows.length} sessions:`, ""]; - for (const row of rows) { - const path = row["path"]; - const project = row["project"] || ""; - const description = (row["description"] || "").slice(0, 120); - const date = (row["creation_date"] || "").slice(0, 10); - lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); +function buildVirtualIndexContent(summaryRows, sessionRows = []) { + const total = summaryRows.length + sessionRows.length; + const lines = [ + "# Memory Index", + "", + `${total} entries (${summaryRows.length} summaries, ${sessionRows.length} sessions):`, + "" + ]; + if (summaryRows.length > 0) { + lines.push("## Summaries", ""); + for (const row of summaryRows) { + const path = row["path"]; + const project = row["project"] || ""; + const description = (row["description"] || "").slice(0, 120); + const date = (row["creation_date"] || "").slice(0, 10); + lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); + } + lines.push(""); + } + if (sessionRows.length > 0) { + lines.push("## Sessions", ""); + for (const row of sessionRows) { + const path = row["path"]; + const description = (row["description"] || "").slice(0, 120); + lines.push(`- [${path}](${path}) ${description}`); + } } return lines.join("\n"); } @@ -1110,8 +1128,11 @@ async function readVirtualPathContents(api, memoryTable, sessionsTable, virtualP } } if (result.get("/index.md") === null && uniquePaths.includes("/index.md")) { - const rows2 = await api.query(`SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC`).catch(() => []); - result.set("/index.md", buildVirtualIndexContent(rows2)); + const [summaryRows, sessionRows] = await Promise.all([ + api.query(`SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC`).catch(() => []), + api.query(`SELECT path, description FROM "${sessionsTable}" WHERE path LIKE '/sessions/%' ORDER BY path`).catch(() => []) + ]); + result.set("/index.md", buildVirtualIndexContent(summaryRows, sessionRows)); } return result; } diff --git a/codex/bundle/pre-tool-use.js b/codex/bundle/pre-tool-use.js index a31916a..5ba57c3 100755 --- a/codex/bundle/pre-tool-use.js +++ b/codex/bundle/pre-tool-use.js @@ -1027,14 +1027,32 @@ async function handleGrepDirect(api, table, sessionsTable, params) { function normalizeSessionPart(path, content) { return normalizeContent(path, content); } -function buildVirtualIndexContent(rows) { - const lines = ["# Memory Index", "", `${rows.length} sessions:`, ""]; - for (const row of rows) { - const path = row["path"]; - const project = row["project"] || ""; - const description = (row["description"] || "").slice(0, 120); - const date = (row["creation_date"] || "").slice(0, 10); - lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); +function buildVirtualIndexContent(summaryRows, sessionRows = []) { + const total = summaryRows.length + sessionRows.length; + const lines = [ + "# Memory Index", + "", + `${total} entries (${summaryRows.length} summaries, ${sessionRows.length} sessions):`, + "" + ]; + if (summaryRows.length > 0) { + lines.push("## Summaries", ""); + for (const row of summaryRows) { + const path = row["path"]; + const project = row["project"] || ""; + const description = (row["description"] || "").slice(0, 120); + const date = (row["creation_date"] || "").slice(0, 10); + lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); + } + lines.push(""); + } + if (sessionRows.length > 0) { + lines.push("## Sessions", ""); + for (const row of sessionRows) { + const path = row["path"]; + const description = (row["description"] || "").slice(0, 120); + lines.push(`- [${path}](${path}) ${description}`); + } } return lines.join("\n"); } @@ -1097,8 +1115,11 @@ async function readVirtualPathContents(api, memoryTable, sessionsTable, virtualP } } if (result.get("/index.md") === null && uniquePaths.includes("/index.md")) { - const rows2 = await api.query(`SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC`).catch(() => []); - result.set("/index.md", buildVirtualIndexContent(rows2)); + const [summaryRows, sessionRows] = await Promise.all([ + api.query(`SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC`).catch(() => []), + api.query(`SELECT path, description FROM "${sessionsTable}" WHERE path LIKE '/sessions/%' ORDER BY path`).catch(() => []) + ]); + result.set("/index.md", buildVirtualIndexContent(summaryRows, sessionRows)); } return result; } diff --git a/src/hooks/virtual-table-query.ts b/src/hooks/virtual-table-query.ts index 34f0bf6..736bb5a 100644 --- a/src/hooks/virtual-table-query.ts +++ b/src/hooks/virtual-table-query.ts @@ -8,14 +8,32 @@ function normalizeSessionPart(path: string, content: string): string { return normalizeContent(path, content); } -export function buildVirtualIndexContent(rows: Row[]): string { - const lines = ["# Memory Index", "", `${rows.length} sessions:`, ""]; - for (const row of rows) { - const path = row["path"] as string; - const project = row["project"] as string || ""; - const description = (row["description"] as string || "").slice(0, 120); - const date = (row["creation_date"] as string || "").slice(0, 10); - lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); +export function buildVirtualIndexContent(summaryRows: Row[], sessionRows: Row[] = []): string { + const total = summaryRows.length + sessionRows.length; + const lines = [ + "# Memory Index", + "", + `${total} entries (${summaryRows.length} summaries, ${sessionRows.length} sessions):`, + "", + ]; + if (summaryRows.length > 0) { + lines.push("## Summaries", ""); + for (const row of summaryRows) { + const path = row["path"] as string; + const project = row["project"] as string || ""; + const description = (row["description"] as string || "").slice(0, 120); + const date = (row["creation_date"] as string || "").slice(0, 10); + lines.push(`- [${path}](${path}) ${date} ${project ? `[${project}]` : ""} ${description}`); + } + lines.push(""); + } + if (sessionRows.length > 0) { + lines.push("## Sessions", ""); + for (const row of sessionRows) { + const path = row["path"] as string; + const description = (row["description"] as string || "").slice(0, 120); + lines.push(`- [${path}](${path}) ${description}`); + } } return lines.join("\n"); } @@ -101,10 +119,15 @@ export async function readVirtualPathContents( } if (result.get("/index.md") === null && uniquePaths.includes("/index.md")) { - const rows = await api.query( - `SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC` - ).catch(() => []); - result.set("/index.md", buildVirtualIndexContent(rows)); + const [summaryRows, sessionRows] = await Promise.all([ + api.query( + `SELECT path, project, description, creation_date FROM "${memoryTable}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC` + ).catch(() => [] as Row[]), + api.query( + `SELECT path, description FROM "${sessionsTable}" WHERE path LIKE '/sessions/%' ORDER BY path` + ).catch(() => [] as Row[]), + ]); + result.set("/index.md", buildVirtualIndexContent(summaryRows, sessionRows)); } return result; From 9631bb5de3af745b845ced146ca0f6139141a009 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 20:40:00 +0000 Subject: [PATCH 20/39] test(virtual-table-query): cover session listing in synthesized index Lock in the fix that made `buildVirtualIndexContent` aware of session rows and the fallback path in `readVirtualPathContents` query both tables when /index.md has no physical row. New unit tests for `buildVirtualIndexContent`: - renders both sections with a combined "N entries (X summaries, Y sessions):" header when both tables have rows, with Summaries listed before Sessions - renders only sessions when the memory table is empty (guards the baseline_cloud regression where the old output reported "0 sessions:" despite 272 rows in the sessions table) - stays backwards-compatible for callers that pass only summary rows - produces a well-formed empty index when both inputs are empty New integration tests for `readVirtualPathContents`: - when /index.md has no physical row, the fallback issues three queries (union for exact paths + two parallel fallback queries) and each fallback targets the correct table and LIKE filter - the synthesized index still renders summaries if the sessions-table fallback query rejects One existing test (`reads multiple exact paths in a single query and synthesizes /index.md when needed`) was updated to expect three calls instead of two, matching the new dual-table fallback behavior. --- claude-code/tests/virtual-table-query.test.ts | 142 +++++++++++++++++- 1 file changed, 140 insertions(+), 2 deletions(-) diff --git a/claude-code/tests/virtual-table-query.test.ts b/claude-code/tests/virtual-table-query.test.ts index bcace78..013c6c0 100644 --- a/claude-code/tests/virtual-table-query.test.ts +++ b/claude-code/tests/virtual-table-query.test.ts @@ -80,14 +80,16 @@ describe("virtual-table-query", () => { description: "session summary", creation_date: "2026-01-01T00:00:00.000Z", }, - ]), + ]) + .mockResolvedValueOnce([]), } as any; const content = await readVirtualPathContents(api, "memory", "sessions", ["/summaries/a.md", "/index.md"]); expect(content.get("/summaries/a.md")).toBe("summary body"); expect(content.get("/index.md")).toContain("# Memory Index"); - expect(api.query).toHaveBeenCalledTimes(2); + // 1 union query for exact paths + 2 parallel fallback queries (summaries + sessions) for /index.md + expect(api.query).toHaveBeenCalledTimes(3); }); it("ignores invalid exact-read rows before merging content", async () => { @@ -218,4 +220,140 @@ describe("virtual-table-query", () => { expect(String(api.query.mock.calls[0]?.[0])).toContain("path LIKE '/summaries/a/%'"); }); + + // ── Regression coverage: /index.md must list session files too ─────────── + // + // Bug: in workspaces where the `memory` table is empty or dropped (e.g. the + // sessions-only `locomo_benchmark/baseline` workspace), the synthesized + // /index.md used to report "0 sessions:" and list nothing, even when the + // `sessions` table held hundreds of rows. Agents reading that index + // concluded memory was empty and gave up on retrieval. + + describe("buildVirtualIndexContent: sessions + summaries", () => { + it("renders both sections with a combined header when both tables have rows", () => { + const content = buildVirtualIndexContent( + [ + { + path: "/summaries/alice/s1.md", + project: "repo", + description: "summary one", + creation_date: "2026-01-01T00:00:00.000Z", + }, + ], + [ + { path: "/sessions/conv_0_session_1.json", description: "session one" }, + { path: "/sessions/conv_0_session_2.json", description: "session two" }, + ], + ); + + expect(content).toContain("3 entries (1 summaries, 2 sessions):"); + expect(content).toContain("## Summaries"); + expect(content).toContain("## Sessions"); + expect(content).toContain("/summaries/alice/s1.md"); + expect(content).toContain("/sessions/conv_0_session_1.json"); + expect(content).toContain("/sessions/conv_0_session_2.json"); + // Summaries section comes before Sessions section + expect(content.indexOf("## Summaries")).toBeLessThan(content.indexOf("## Sessions")); + }); + + it("renders only sessions when the memory table is empty (the baseline_cloud regression)", () => { + const content = buildVirtualIndexContent( + [], + [ + { path: "/sessions/conv_0_session_1.json", description: "first" }, + { path: "/sessions/conv_0_session_2.json", description: "second" }, + ], + ); + + expect(content).toContain("2 entries (0 summaries, 2 sessions):"); + expect(content).toContain("## Sessions"); + expect(content).not.toContain("## Summaries"); + expect(content).toContain("/sessions/conv_0_session_1.json"); + // Guard against the old bug: must not report "0 sessions:" as the total. + expect(content).not.toMatch(/\n0 sessions:/); + }); + + it("stays backwards-compatible when called with only summary rows", () => { + const content = buildVirtualIndexContent([ + { + path: "/summaries/alice/s1.md", + project: "repo", + description: "summary only", + creation_date: "2026-01-01T00:00:00.000Z", + }, + ]); + + expect(content).toContain("1 entries (1 summaries, 0 sessions):"); + expect(content).toContain("/summaries/alice/s1.md"); + expect(content).not.toContain("## Sessions"); + }); + + it("produces a well-formed empty index when both tables are empty", () => { + const content = buildVirtualIndexContent([], []); + expect(content).toContain("# Memory Index"); + expect(content).toContain("0 entries (0 summaries, 0 sessions):"); + expect(content).not.toContain("## Summaries"); + expect(content).not.toContain("## Sessions"); + }); + }); + + describe("readVirtualPathContents: /index.md fallback queries both tables", () => { + it("queries both memory and sessions tables in parallel when /index.md has no physical row", async () => { + const api = { + query: vi.fn() + // 1. Union query for the exact-path read (no /index.md row present) + .mockResolvedValueOnce([]) + // 2. Parallel fallback: summaries from memory (empty — baseline_cloud case) + .mockResolvedValueOnce([]) + // 3. Parallel fallback: sessions table (272 rows) + .mockResolvedValueOnce([ + { path: "/sessions/conv_0_session_1.json", description: "conv 0 sess 1" }, + { path: "/sessions/conv_0_session_2.json", description: "conv 0 sess 2" }, + ]), + } as any; + + const result = await readVirtualPathContents(api, "memory", "sessions", ["/index.md"]); + const indexContent = result.get("/index.md") ?? ""; + + expect(api.query).toHaveBeenCalledTimes(3); + + const fallbackSqls = [ + String(api.query.mock.calls[1]?.[0] ?? ""), + String(api.query.mock.calls[2]?.[0] ?? ""), + ]; + const summarySql = fallbackSqls.find(sql => sql.includes("/summaries/%")) ?? ""; + const sessionsSql = fallbackSqls.find(sql => sql.includes("/sessions/%")) ?? ""; + + expect(summarySql).toContain('FROM "memory"'); + expect(summarySql).toContain("path LIKE '/summaries/%'"); + expect(sessionsSql).toContain('FROM "sessions"'); + expect(sessionsSql).toContain("path LIKE '/sessions/%'"); + + expect(indexContent).toContain("2 entries (0 summaries, 2 sessions):"); + expect(indexContent).toContain("/sessions/conv_0_session_1.json"); + expect(indexContent).toContain("/sessions/conv_0_session_2.json"); + }); + + it("still produces an index when the sessions-table fallback query fails", async () => { + const api = { + query: vi.fn() + .mockResolvedValueOnce([]) // union query for exact paths + .mockResolvedValueOnce([ + { + path: "/summaries/alice/s1.md", + project: "repo", + description: "summary", + creation_date: "2026-01-01T00:00:00.000Z", + }, + ]) + .mockRejectedValueOnce(new Error("sessions table down")), + } as any; + + const result = await readVirtualPathContents(api, "memory", "sessions", ["/index.md"]); + const indexContent = result.get("/index.md") ?? ""; + + expect(indexContent).toContain("1 entries (1 summaries, 0 sessions):"); + expect(indexContent).toContain("/summaries/alice/s1.md"); + }); + }); }); From 3af02a0f0f60cb0c07b3421df7e09193bacb8776 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 20:45:49 +0000 Subject: [PATCH 21/39] test(baseline_cloud 3-QA): end-to-end regression tests anchored in real QAs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds integration coverage for the three LoCoMo QAs that cloud baseline got wrong before the /index.md fix landed (conv_0 questions 6, 25, 46): - qa_6 : "When is Melanie planning on going camping?" (gold: June 2023) - qa_25 : "When did Caroline go to the LGBTQ conference?" (10 July 2023) - qa_46 : "Would Melanie be considered an ally..." (Yes, she is supportive) Each QA is driven through `processPreToolUse` twice — once via the Read-tool intercept (`Read /home/.deeplake/memory/index.md`) and once via the Bash intercept (`cat /home/.deeplake/memory/index.md`) — against a DeeplakeApi mock that mirrors the real sessions-only baseline workspace at the time of the regression (memory table empty, 272 rows across conv_0..9 in the sessions table). The assertions verify the synthesized index reports "272 entries (0 summaries, 272 sessions):", contains the specific session file each QA needed (conv_0_session_2 for the camping date, conv_0_session_7 for the conference, conv_0_session_10 for the ally question), and does not regress to "0 sessions:" or "1 sessions:" headers. The suite also exercises the pure builder and the `readVirtualPathContents` fallback against the same 272-row fixture so the regression is caught at the unit, integration, and entry-point boundaries. Tests run hermetically by stubbing the disk-backed session cache so they do not read or write ~/.deeplake/query-cache/. Verified by temporarily reverting the fix on virtual-table-query.ts: all eight assertions fail without the fix (0 sessions: header, missing session paths), then pass cleanly once the fix is restored. --- .../pre-tool-use-baseline-cloud-3qa.test.ts | 188 ++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts diff --git a/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts b/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts new file mode 100644 index 0000000..47da1e1 --- /dev/null +++ b/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts @@ -0,0 +1,188 @@ +/** + * Integration coverage for the three real LoCoMo QAs that the + * `locomo_benchmark/baseline` cloud baseline run got wrong before fix + * #1 landed. Each case exercises the Read/Bash entry points of + * `processPreToolUse` against a workspace snapshot that mirrors the + * real baseline workspace at the time of the regression: + * + * - `memory` table: empty (summaries have been dropped) + * - `sessions` table: 272 rows, one per LoCoMo session file + * + * The fix (commit 4271baf) taught `buildVirtualIndexContent` and the + * /index.md fallback in `readVirtualPathContents` to merge session rows + * alongside summary rows. Without that fix the synthesized index + * reported "0 sessions:" in this workspace and agents concluded memory + * was empty. These tests fail loudly if the regression returns. + */ + +import { describe, expect, it, vi } from "vitest"; +import { processPreToolUse } from "../../src/hooks/pre-tool-use.js"; +import { + buildVirtualIndexContent, + readVirtualPathContents, +} from "../../src/hooks/virtual-table-query.js"; + +// ── Fixture: 272 session rows matching the real `locomo_benchmark/baseline` +// workspace shape — `/sessions/conv__session_.json` — spanning +// conv 0..9 with session counts matching the LoCoMo dataset. +const SESSION_COUNTS_PER_CONV: Record = { + 0: 35, 1: 34, 2: 28, 3: 25, 4: 26, 5: 27, 6: 23, 7: 27, 8: 26, 9: 21, +}; + +function makeSessionRows(): Array<{ path: string; description: string }> { + const rows: Array<{ path: string; description: string }> = []; + for (const [conv, count] of Object.entries(SESSION_COUNTS_PER_CONV)) { + for (let s = 1; s <= count; s++) { + rows.push({ + path: `/sessions/conv_${conv}_session_${s}.json`, + description: `LoCoMo conv ${conv} session ${s}`, + }); + } + } + return rows; +} + +const SESSION_ROWS = makeSessionRows(); + +// Sanity-check the fixture shape so a bad edit fails here, not deep in a test. +if (SESSION_ROWS.length !== 272) { + throw new Error(`fixture should model 272 rows, got ${SESSION_ROWS.length}`); +} + +// ── Real QAs from `results/baseline_cloud/scored_baseline_cloud.jsonl` +// that baseline-local got right and baseline-cloud got wrong before the +// fix. Each row is verbatim from the scored JSONL except `session_file` +// which records the session we'd expect Claude to land on. +const REAL_QAS = [ + { + name: "qa_6: Melanie's camping plans", + question: "When is Melanie planning on going camping?", + gold_answer: "June 2023", + expected_session_file: "/sessions/conv_0_session_2.json", + }, + { + name: "qa_25: Caroline's LGBTQ conference", + question: "When did Caroline go to the LGBTQ conference?", + gold_answer: "10 July 2023", + expected_session_file: "/sessions/conv_0_session_7.json", + }, + { + name: "qa_46: Melanie as an ally", + question: "Would Melanie be considered an ally to the transgender community?", + gold_answer: "Yes, she is supportive", + expected_session_file: "/sessions/conv_0_session_10.json", + }, +] as const; + +const BASE_CONFIG = { + token: "test-token", + apiUrl: "https://api.test", + orgId: "locomo_benchmark", + workspaceId: "baseline", +}; + +/** Simulates the real baseline workspace: memory empty, sessions populated. */ +function makeBaselineWorkspaceApi(sessionRows = SESSION_ROWS) { + return { + query: vi.fn(async (sql: string) => { + // Memory-table queries return 0 rows (memory table dropped). + if (/FROM\s+"memory"/i.test(sql)) return []; + // Sessions-table fallback query for the virtual /index.md: + if (/FROM\s+"sessions".*\/sessions\/%/i.test(sql)) return sessionRows; + // Union query for exact-path reads of /index.md resolves to nothing — + // forces the fallback branch that builds the synthetic index. + if (/UNION ALL/i.test(sql)) return []; + return []; + }), + } as any; +} + +describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { + it("pure builder renders a real 272-row index without the old '0 sessions:' bug", () => { + const content = buildVirtualIndexContent([], SESSION_ROWS); + + expect(content).toContain("272 entries (0 summaries, 272 sessions):"); + expect(content).toContain("## Sessions"); + expect(content).not.toContain("## Summaries"); + // Bug guard: the old output had a lone "${n} sessions:" header with + // n taken from summary rows only. In this workspace that would be 0. + expect(content).not.toMatch(/^0 sessions:$/m); + expect(content).not.toContain("\n0 sessions:\n"); + + // Every real session path from the fixture must appear in the index. + for (const row of SESSION_ROWS) { + expect(content).toContain(row.path); + } + }); + + it("readVirtualPathContents fallback pulls sessions into /index.md for the baseline workspace", async () => { + const api = makeBaselineWorkspaceApi(); + const result = await readVirtualPathContents(api, "memory", "sessions", ["/index.md"]); + const indexContent = result.get("/index.md") ?? ""; + + expect(indexContent).toContain("272 entries (0 summaries, 272 sessions):"); + // Must land on the three sessions that carry answers for our 3 real QAs. + for (const qa of REAL_QAS) { + expect(indexContent).toContain(qa.expected_session_file); + } + }); + + for (const qa of REAL_QAS) { + describe(qa.name, () => { + it("Read /home/.deeplake/memory/index.md intercept returns the real session listing (not '1 sessions:')", async () => { + const api = makeBaselineWorkspaceApi(); + + const decision = await processPreToolUse( + { + session_id: `s-${qa.expected_session_file}`, + tool_name: "Read", + tool_input: { file_path: "~/.deeplake/memory/index.md" }, + tool_use_id: "tu-read-index", + }, + { + config: BASE_CONFIG, + createApi: vi.fn(() => api), + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + readCachedIndexContentFn: () => null, + writeCachedIndexContentFn: () => undefined, + }, + ); + + expect(decision).not.toBeNull(); + const body = decision?.command ?? ""; + expect(body).toContain("# Memory Index"); + expect(body).toContain("272 entries (0 summaries, 272 sessions):"); + expect(body).toContain(qa.expected_session_file); + // Regression guard: the old (buggy) synthesized index printed + // " sessions:" where n was the count of summary rows only. + expect(body).not.toMatch(/\b0 sessions:/); + expect(body).not.toMatch(/\b1 sessions:/); + }); + + it("Bash cat index.md intercept returns the same real session listing", async () => { + const api = makeBaselineWorkspaceApi(); + + const decision = await processPreToolUse( + { + session_id: `s-bash-${qa.expected_session_file}`, + tool_name: "Bash", + tool_input: { command: "cat ~/.deeplake/memory/index.md" }, + tool_use_id: "tu-cat-index", + }, + { + config: BASE_CONFIG, + createApi: vi.fn(() => api), + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + readCachedIndexContentFn: () => null, + writeCachedIndexContentFn: () => undefined, + }, + ); + + expect(decision).not.toBeNull(); + const body = decision?.command ?? ""; + expect(body).toContain("272 entries (0 summaries, 272 sessions):"); + expect(body).toContain(qa.expected_session_file); + }); + }); + } +}); From 4c5d50bbfbe0bcfb706b74f0e6f1b9a6516f057b Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:02:32 +0000 Subject: [PATCH 22/39] fix(pre-tool-use): return file_path for Read-tool intercepts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Claude Code hooks replace the tool input with whatever `updatedInput` they emit. The pre-tool-use hook was always emitting `{command, description}` — the Bash-tool shape — even when the incoming tool was Read. The Read implementation then read `updatedInput.file_path`, found `undefined`, and crashed with: "The 'path' property must be of type string, got undefined" Claude wasted a turn (or more) recovering by re-issuing the read as a Bash `cat`. In the plugin-v8-optimizations-100 run (memory table populated, 272 summaries), 60 / 100 transcripts contained this error. In the sessions-only baseline_cloud run it was even worse because the recovery path hit fix #1's `/index.md` bug on top. The fix teaches the hook to materialize Read intercepts into a real file on disk and return the path: - Add an optional `file_path` field to ClaudePreToolDecision. When present, main() emits `updatedInput: {file_path}` instead of the Bash-shaped `{command, description}`. - Add `writeReadCacheFile(sessionId, virtualPath, content)` which writes into `~/.deeplake/query-cache//read/`, mirroring the per-session cache the index already uses. Cleanup reuses the existing session-end path. - Add `buildReadDecision(file_path, description)` so the call site is explicit about the Read-tool shape. - Branch in the direct-read code path: when `input.tool_name === "Read"`, write the fetched content via `writeReadCacheFile` and return `buildReadDecision(...)`. Bash cat / head / tail / wc keep their existing `echo ` shape. - Thread `writeReadCacheFileFn` through the existing deps so tests can stub it and stay hermetic. Test updates: - `hooks-source.test.ts > reuses cached /index.md content ...` now asserts `directDecision?.file_path` instead of `.command` for the Read variant, with a stubbed cache writer that captures the written content. - `hooks-source.test.ts > uses direct grep, direct reads, listings ...` updated the Read assertion the same way. - `pre-tool-use-baseline-cloud-3qa.test.ts` Read cases now assert that the decision carries `file_path` (bug #2 guard) while the Bash cases confirm `command` still exists (bash shape preserved). Verified: stashing the fix causes all three Read-tool per-QA tests to fail; restoring the fix makes them pass. End-to-end verified against locomo_benchmark/baseline (272 sessions, memory dropped) on a 5-QA subset spanning conv 0 questions 6 / 25 / 29 / 46 / 62 — five QAs that baseline-local answered correctly and the original baseline_cloud run got wrong. Post-fix run: 5 / 5 correct, 0 occurrences of "property must be of type string" across the five transcripts. (Haiku happened to pick Bash over Read for each QA in this run, so the Read intercept didn't fire in-flight; the unit tests and the earlier fix1b transcript where Read was attempted cover that path.) --- claude-code/bundle/pre-tool-use.js | 29 +++++++++-- claude-code/tests/hooks-source.test.ts | 28 ++++++++-- .../pre-tool-use-baseline-cloud-3qa.test.ts | 33 ++++++++++-- src/hooks/pre-tool-use.ts | 51 ++++++++++++++++++- 4 files changed, 127 insertions(+), 14 deletions(-) diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index 5652e1c..3102cf7 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -1,7 +1,8 @@ #!/usr/bin/env node // dist/src/hooks/pre-tool-use.js -import { existsSync as existsSync3 } from "node:fs"; +import { existsSync as existsSync3, mkdirSync as mkdirSync3, writeFileSync as writeFileSync3 } from "node:fs"; +import { homedir as homedir5 } from "node:os"; import { join as join6, dirname } from "node:path"; import { fileURLToPath as fileURLToPath2 } from "node:url"; @@ -1806,6 +1807,19 @@ function rewritePaths(cmd) { var log4 = (msg) => log("pre", msg); var __bundleDir = dirname(fileURLToPath2(import.meta.url)); var SHELL_BUNDLE = existsSync3(join6(__bundleDir, "shell", "deeplake-shell.js")) ? join6(__bundleDir, "shell", "deeplake-shell.js") : join6(__bundleDir, "..", "shell", "deeplake-shell.js"); +var READ_CACHE_ROOT = join6(homedir5(), ".deeplake", "query-cache"); +function writeReadCacheFile(sessionId, virtualPath, content, deps = {}) { + const { cacheRoot = READ_CACHE_ROOT } = deps; + const safeSessionId = sessionId.replace(/[^a-zA-Z0-9._-]/g, "_") || "unknown"; + const rel = virtualPath.replace(/^\/+/, "") || "content"; + const absPath = join6(cacheRoot, safeSessionId, "read", rel); + mkdirSync3(dirname(absPath), { recursive: true }); + writeFileSync3(absPath, content, "utf-8"); + return absPath; +} +function buildReadDecision(file_path, description) { + return { command: "", description, file_path }; +} function getReadTargetPath(toolInput) { const rawPath = toolInput.file_path ?? toolInput.path; return rawPath ? rawPath : null; @@ -1886,7 +1900,7 @@ function buildFallbackDecision(shellCmd, shellBundle = SHELL_BUNDLE) { return buildAllowDecision(`node "${shellBundle}" -c "${shellCmd.replace(/"/g, '\\"')}"`, `[DeepLake shell] ${shellCmd}`); } async function processPreToolUse(input, deps = {}) { - const { config = loadConfig(), createApi = (table2, activeConfig) => new DeeplakeApi(activeConfig.token, activeConfig.apiUrl, activeConfig.orgId, activeConfig.workspaceId, table2), executeCompiledBashCommandFn = executeCompiledBashCommand, handleGrepDirectFn = handleGrepDirect, readVirtualPathContentsFn = readVirtualPathContents, readVirtualPathContentFn = readVirtualPathContent, listVirtualPathRowsFn = listVirtualPathRows, findVirtualPathsFn = findVirtualPaths, readCachedIndexContentFn = readCachedIndexContent, writeCachedIndexContentFn = writeCachedIndexContent, shellBundle = SHELL_BUNDLE, logFn = log4 } = deps; + const { config = loadConfig(), createApi = (table2, activeConfig) => new DeeplakeApi(activeConfig.token, activeConfig.apiUrl, activeConfig.orgId, activeConfig.workspaceId, table2), executeCompiledBashCommandFn = executeCompiledBashCommand, handleGrepDirectFn = handleGrepDirect, readVirtualPathContentsFn = readVirtualPathContents, readVirtualPathContentFn = readVirtualPathContent, listVirtualPathRowsFn = listVirtualPathRows, findVirtualPathsFn = findVirtualPaths, readCachedIndexContentFn = readCachedIndexContent, writeCachedIndexContentFn = writeCachedIndexContent, writeReadCacheFileFn = writeReadCacheFile, shellBundle = SHELL_BUNDLE, logFn = log4 } = deps; const cmd = input.tool_input.command ?? ""; const shellCmd = getShellCommand(input.tool_name, input.tool_input); const toolPath = getReadTargetPath(input.tool_input) ?? input.tool_input.path ?? ""; @@ -2022,6 +2036,10 @@ async function processPreToolUse(input, deps = {}) { content = fromEnd ? lines.slice(-lineLimit).join("\n") : lines.slice(0, lineLimit).join("\n"); } const label = lineLimit > 0 ? fromEnd ? `tail -${lineLimit}` : `head -${lineLimit}` : "cat"; + if (input.tool_name === "Read") { + const file_path = writeReadCacheFileFn(input.session_id, virtualPath, content); + return buildReadDecision(file_path, `[DeepLake direct] ${label} ${virtualPath}`); + } return buildAllowDecision(`echo ${JSON.stringify(content)}`, `[DeepLake direct] ${label} ${virtualPath}`); } } @@ -2092,11 +2110,12 @@ async function main() { const decision = await processPreToolUse(input); if (!decision) return; + const updatedInput = decision.file_path !== void 0 ? { file_path: decision.file_path } : { command: decision.command, description: decision.description }; console.log(JSON.stringify({ hookSpecificOutput: { hookEventName: "PreToolUse", permissionDecision: "allow", - updatedInput: decision + updatedInput } })); } @@ -2108,10 +2127,12 @@ if (isDirectRun(import.meta.url)) { } export { buildAllowDecision, + buildReadDecision, extractGrepParams, getShellCommand, isSafe, processPreToolUse, rewritePaths, - touchesMemory + touchesMemory, + writeReadCacheFile }; diff --git a/claude-code/tests/hooks-source.test.ts b/claude-code/tests/hooks-source.test.ts index 10c4595..4dceb1a 100644 --- a/claude-code/tests/hooks-source.test.ts +++ b/claude-code/tests/hooks-source.test.ts @@ -329,6 +329,7 @@ describe("claude pre-tool source", () => { }, ]), }; + const capturedReadFiles: Array<{ path: string; content: string }> = []; const readDecision = await processPreToolUse({ session_id: "s1", tool_name: "Read", @@ -339,8 +340,17 @@ describe("claude pre-tool source", () => { createApi: vi.fn(() => api as any), readVirtualPathContentFn: vi.fn(async () => null) as any, executeCompiledBashCommandFn: vi.fn(async () => null) as any, + writeReadCacheFileFn: ((sessionId: string, virtualPath: string, content: string) => { + const tmp = `/tmp/hooks-source.test-${sessionId}${virtualPath}`; + capturedReadFiles.push({ path: tmp, content }); + return tmp; + }) as any, }); - expect(readDecision?.command).toContain("# Memory Index"); + // Read-tool intercepts return {file_path} (Claude Code's Read expects that); + // the index content is written to disk at that path, not inlined in command. + expect(readDecision?.file_path).toBe("/tmp/hooks-source.test-s1/index.md"); + expect(capturedReadFiles).toHaveLength(1); + expect(capturedReadFiles[0]?.content).toContain("# Memory Index"); const readDirDecision = await processPreToolUse({ session_id: "s1", @@ -403,6 +413,12 @@ describe("claude pre-tool source", () => { const readCachedIndexContentFn = vi.fn(() => "cached index"); const writeCachedIndexContentFn = vi.fn(); + const capturedReadFiles: Array<{ sessionId: string; virtualPath: string; content: string }> = []; + const writeReadCacheFileFn = vi.fn((sessionId: string, virtualPath: string, content: string) => { + capturedReadFiles.push({ sessionId, virtualPath, content }); + return `/tmp/read-cache-${sessionId}${virtualPath}`; + }); + const directDecision = await processPreToolUse({ session_id: "s1", tool_name: "Read", @@ -414,8 +430,14 @@ describe("claude pre-tool source", () => { writeCachedIndexContentFn: writeCachedIndexContentFn as any, readVirtualPathContentFn: readVirtualPathContentFn as any, executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(directDecision?.command).toContain("cached index"); + writeReadCacheFileFn: writeReadCacheFileFn as any, + }); + // Read-tool intercepts emit {file_path}; content is materialized to disk + // via writeReadCacheFileFn, not inlined in command. + expect(directDecision?.file_path).toBe("/tmp/read-cache-s1/index.md"); + expect(capturedReadFiles).toEqual([ + { sessionId: "s1", virtualPath: "/index.md", content: "cached index" }, + ]); expect(readVirtualPathContentFn).not.toHaveBeenCalled(); expect(writeCachedIndexContentFn).toHaveBeenCalledWith("s1", "cached index"); diff --git a/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts b/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts index 47da1e1..acce536 100644 --- a/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts +++ b/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts @@ -129,8 +129,9 @@ describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { for (const qa of REAL_QAS) { describe(qa.name, () => { - it("Read /home/.deeplake/memory/index.md intercept returns the real session listing (not '1 sessions:')", async () => { + it("Read /home/.deeplake/memory/index.md intercept returns file_path (Read-tool shape) pointing to the real session listing", async () => { const api = makeBaselineWorkspaceApi(); + const capturedReadFiles: Array<{ sessionId: string; virtualPath: string; content: string; returnedPath: string }> = []; const decision = await processPreToolUse( { @@ -145,21 +146,39 @@ describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { executeCompiledBashCommandFn: vi.fn(async () => null) as any, readCachedIndexContentFn: () => null, writeCachedIndexContentFn: () => undefined, + writeReadCacheFileFn: ((sessionId: string, virtualPath: string, content: string) => { + const returnedPath = `/tmp/baseline-cloud-3qa-test-${sessionId.replace(/[^a-zA-Z0-9._-]/g, "_")}${virtualPath}`; + capturedReadFiles.push({ sessionId, virtualPath, content, returnedPath }); + return returnedPath; + }) as any, }, ); + // Regression guard for bug #2: Read intercept MUST return a decision + // that causes main() to emit `updatedInput: {file_path}`. Today that + // means the decision carries `file_path`. If this asserts "undefined", + // Claude Code's Read tool will error with "path must be of type string". expect(decision).not.toBeNull(); - const body = decision?.command ?? ""; + expect(decision?.file_path).toBeDefined(); + expect(typeof decision?.file_path).toBe("string"); + + // Content must be materialized once, with the real index shape. + expect(capturedReadFiles).toHaveLength(1); + const materialized = capturedReadFiles[0]; + expect(materialized?.virtualPath).toBe("/index.md"); + expect(decision?.file_path).toBe(materialized?.returnedPath); + + const body = materialized?.content ?? ""; expect(body).toContain("# Memory Index"); expect(body).toContain("272 entries (0 summaries, 272 sessions):"); expect(body).toContain(qa.expected_session_file); - // Regression guard: the old (buggy) synthesized index printed - // " sessions:" where n was the count of summary rows only. + // Fix #1 regression guard (still important after fix #2): the old + // synthesized index reported sessions from the memory table only. expect(body).not.toMatch(/\b0 sessions:/); expect(body).not.toMatch(/\b1 sessions:/); }); - it("Bash cat index.md intercept returns the same real session listing", async () => { + it("Bash cat index.md intercept returns the same listing via {command} (bash shape preserved)", async () => { const api = makeBaselineWorkspaceApi(); const decision = await processPreToolUse( @@ -179,6 +198,10 @@ describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { ); expect(decision).not.toBeNull(); + // Bash intercepts keep the historical {command, description} shape — + // Claude Code's Bash tool reads `command`. The content is inlined as + // an `echo "..."` payload so the virtual shell isn't needed here. + expect(decision?.file_path).toBeUndefined(); const body = decision?.command ?? ""; expect(body).toContain("272 entries (0 summaries, 272 sessions):"); expect(body).toContain(qa.expected_session_file); diff --git a/src/hooks/pre-tool-use.ts b/src/hooks/pre-tool-use.ts index 2dc6498..1a3b43d 100644 --- a/src/hooks/pre-tool-use.ts +++ b/src/hooks/pre-tool-use.ts @@ -1,6 +1,7 @@ #!/usr/bin/env node -import { existsSync } from "node:fs"; +import { existsSync, mkdirSync, writeFileSync } from "node:fs"; +import { homedir } from "node:os"; import { join, dirname } from "node:path"; import { fileURLToPath } from "node:url"; import { readStdin } from "../utils/stdin.js"; @@ -42,6 +43,43 @@ export interface PreToolUseInput { export interface ClaudePreToolDecision { command: string; description: string; + /** + * When set, main() emits the hook response as `updatedInput: {file_path}` + * instead of `updatedInput: {command, description}`. This is required for + * Read-tool intercepts: Claude Code's Read implementation reads + * `updatedInput.file_path` and errors with "path must be of type string, + * got undefined" if the hook hands it the Bash-shaped input. + */ + file_path?: string; +} + +const READ_CACHE_ROOT = join(homedir(), ".deeplake", "query-cache"); + +/** + * Materialize fetched content for a Read intercept into a real file on disk + * so Claude Code's Read tool can read it via `updatedInput.file_path`. The + * file lives under `~/.deeplake/query-cache//read/` and mirrors + * the virtual path structure (e.g. `/sessions/conv_0_session_1.json` → + * `.../read/sessions/conv_0_session_1.json`). Per-session dirs are cleaned + * alongside the index cache at session end. + */ +export function writeReadCacheFile( + sessionId: string, + virtualPath: string, + content: string, + deps: { cacheRoot?: string } = {}, +): string { + const { cacheRoot = READ_CACHE_ROOT } = deps; + const safeSessionId = sessionId.replace(/[^a-zA-Z0-9._-]/g, "_") || "unknown"; + const rel = virtualPath.replace(/^\/+/, "") || "content"; + const absPath = join(cacheRoot, safeSessionId, "read", rel); + mkdirSync(dirname(absPath), { recursive: true }); + writeFileSync(absPath, content, "utf-8"); + return absPath; +} + +export function buildReadDecision(file_path: string, description: string): ClaudePreToolDecision { + return { command: "", description, file_path }; } function getReadTargetPath(toolInput: Record): string | null { @@ -141,6 +179,7 @@ interface ClaudePreToolDeps { findVirtualPathsFn?: typeof findVirtualPaths; readCachedIndexContentFn?: typeof readCachedIndexContent; writeCachedIndexContentFn?: typeof writeCachedIndexContent; + writeReadCacheFileFn?: typeof writeReadCacheFile; shellBundle?: string; logFn?: (msg: string) => void; } @@ -163,6 +202,7 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT findVirtualPathsFn = findVirtualPaths, readCachedIndexContentFn = readCachedIndexContent, writeCachedIndexContentFn = writeCachedIndexContent, + writeReadCacheFileFn = writeReadCacheFile, shellBundle = SHELL_BUNDLE, logFn = log, } = deps; @@ -314,6 +354,10 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT content = fromEnd ? lines.slice(-lineLimit).join("\n") : lines.slice(0, lineLimit).join("\n"); } const label = lineLimit > 0 ? (fromEnd ? `tail -${lineLimit}` : `head -${lineLimit}`) : "cat"; + if (input.tool_name === "Read") { + const file_path = writeReadCacheFileFn(input.session_id, virtualPath, content); + return buildReadDecision(file_path, `[DeepLake direct] ${label} ${virtualPath}`); + } return buildAllowDecision(`echo ${JSON.stringify(content)}`, `[DeepLake direct] ${label} ${virtualPath}`); } } @@ -385,11 +429,14 @@ async function main(): Promise { const input = await readStdin(); const decision = await processPreToolUse(input); if (!decision) return; + const updatedInput: Record = decision.file_path !== undefined + ? { file_path: decision.file_path } + : { command: decision.command, description: decision.description }; console.log(JSON.stringify({ hookSpecificOutput: { hookEventName: "PreToolUse", permissionDecision: "allow", - updatedInput: decision, + updatedInput, }, })); } From ea892a55f27c9b3daebbf9ab0b9c2bf7b2c6fc71 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:05:49 +0000 Subject: [PATCH 23/39] observability: log silent releaseLock / cleanup failures behind HIVEMIND_DEBUG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The race fix and worker lifecycle have several silent try/catch blocks around releaseLock and tmpdir cleanup. Silent failures there mean a lock-leak or a leftover /tmp/deeplake-wiki-- directory would be impossible to diagnose from the user side. Converts every such catch to a debug-gated log instead of a silent swallow. The `_log` helper in src/utils/debug.ts only writes to ~/.deeplake/hook-debug.log when HIVEMIND_DEBUG=1, so this adds zero noise in normal runs but full traceability when the user opts in. Covered paths: - session-end.ts / codex/stop.ts: the spawn-wrapping catch that releases the lock on spawn failure (flagged by the PR review) - capture.ts / codex/capture.ts: the same pattern in the periodic trigger helper - wiki-worker.ts / codex/wiki-worker.ts: the finally-block releaseLock AND the tmpdir cleanup — uses a new dlog() helper so we do not pollute deeplake-wiki.log (which is unconditional and user-visible) - summary-state.ts: the RMW lock cleanup paths in withRmwLock (both the stale-reclaim unlink and the finally unlink) and tryAcquireLock / releaseLock (ENOENT is filtered — that is the normal "lock wasn't held" case, everything else is worth seeing) Manually verified: with HIVEMIND_DEBUG unset, a forced EACCES on the lock unlink produces no log. With HIVEMIND_DEBUG=1, the same failure lands in hook-debug.log. --- claude-code/bundle/capture.js | 23 ++++++++++---- claude-code/bundle/session-end.js | 16 +++++++--- claude-code/bundle/wiki-worker.js | 26 ++++++++++++--- codex/bundle/capture.js | 23 ++++++++++---- codex/bundle/stop.js | 16 +++++++--- codex/bundle/wiki-worker.js | 53 +++++++++++++++++++++++-------- src/hooks/capture.ts | 7 +++- src/hooks/codex/capture.ts | 7 +++- src/hooks/codex/stop.ts | 7 +++- src/hooks/codex/wiki-worker.ts | 15 +++++++-- src/hooks/session-end.ts | 7 +++- src/hooks/summary-state.ts | 37 ++++++++++++++++++--- src/hooks/wiki-worker.ts | 18 +++++++++-- 13 files changed, 202 insertions(+), 53 deletions(-) diff --git a/claude-code/bundle/capture.js b/claude-code/bundle/capture.js index e25d08b..c207476 100755 --- a/claude-code/bundle/capture.js +++ b/claude-code/bundle/capture.js @@ -306,6 +306,7 @@ var DeeplakeApi = class { import { readFileSync as readFileSync2, writeFileSync, writeSync, mkdirSync, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir3 } from "node:os"; import { join as join3 } from "node:path"; +var dlog = (msg) => log("summary-state", msg); var STATE_DIR = join3(homedir3(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function statePath(sessionId) { @@ -343,9 +344,11 @@ function withRmwLock(sessionId, fn) { if (e.code !== "EEXIST") throw e; if (Date.now() > deadline) { + dlog(`rmw lock deadline exceeded for ${sessionId}, reclaiming stale lock`); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`stale rmw lock unlink failed for ${sessionId}: ${unlinkErr.message}`); } continue; } @@ -358,7 +361,8 @@ function withRmwLock(sessionId, fn) { closeSync(fd); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`rmw lock cleanup failed for ${sessionId}: ${unlinkErr.message}`); } } } @@ -398,11 +402,13 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); if (Number.isFinite(ageMs) && ageMs < maxAgeMs) return false; - } catch { + } catch (readErr) { + dlog(`lock file unreadable for ${sessionId}, treating as stale: ${readErr.message}`); } try { unlinkSync(p); - } catch { + } catch (unlinkErr) { + dlog(`could not unlink stale lock for ${sessionId}: ${unlinkErr.message}`); return false; } } @@ -423,7 +429,10 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -643,9 +652,11 @@ function maybeTriggerPeriodicSummary(sessionId, cwd, config) { reason: "Periodic" }); } catch (e) { + log3(`periodic spawn failed: ${e.message}`); try { releaseLock(sessionId); - } catch { + } catch (releaseErr) { + log3(`releaseLock after periodic spawn failure also failed: ${releaseErr.message}`); } throw e; } diff --git a/claude-code/bundle/session-end.js b/claude-code/bundle/session-end.js index c0f4c66..77caa34 100755 --- a/claude-code/bundle/session-end.js +++ b/claude-code/bundle/session-end.js @@ -182,6 +182,7 @@ function bundleDirFromImportMeta(importMetaUrl) { import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir4 } from "node:os"; import { join as join4 } from "node:path"; +var dlog = (msg) => log("summary-state", msg); var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function lockPath(sessionId) { @@ -195,11 +196,13 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); if (Number.isFinite(ageMs) && ageMs < maxAgeMs) return false; - } catch { + } catch (readErr) { + dlog(`lock file unreadable for ${sessionId}, treating as stale: ${readErr.message}`); } try { unlinkSync(p); - } catch { + } catch (unlinkErr) { + dlog(`could not unlink stale lock for ${sessionId}: ${unlinkErr.message}`); return false; } } @@ -220,7 +223,10 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -255,9 +261,11 @@ async function main() { reason: "SessionEnd" }); } catch (e) { + log2(`spawn failed: ${e.message}`); try { releaseLock(sessionId); - } catch { + } catch (releaseErr) { + log2(`releaseLock after spawn failure also failed: ${releaseErr.message}`); } throw e; } diff --git a/claude-code/bundle/wiki-worker.js b/claude-code/bundle/wiki-worker.js index cd53b4e..02468a3 100755 --- a/claude-code/bundle/wiki-worker.js +++ b/claude-code/bundle/wiki-worker.js @@ -14,11 +14,18 @@ var LOG = join(homedir(), ".deeplake", "hook-debug.log"); function utcTimestamp(d = /* @__PURE__ */ new Date()) { return d.toISOString().replace("T", " ").slice(0, 19) + " UTC"; } +function log(tag, msg) { + if (!DEBUG) + return; + appendFileSync(LOG, `${(/* @__PURE__ */ new Date()).toISOString()} [${tag}] ${msg} +`); +} // dist/src/hooks/summary-state.js import { readFileSync, writeFileSync, writeSync, mkdirSync, renameSync, existsSync, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir2 } from "node:os"; import { join as join2 } from "node:path"; +var dlog = (msg) => log("summary-state", msg); var STATE_DIR = join2(homedir2(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function statePath(sessionId) { @@ -56,9 +63,11 @@ function withRmwLock(sessionId, fn) { if (e.code !== "EEXIST") throw e; if (Date.now() > deadline) { + dlog(`rmw lock deadline exceeded for ${sessionId}, reclaiming stale lock`); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`stale rmw lock unlink failed for ${sessionId}: ${unlinkErr.message}`); } continue; } @@ -71,7 +80,8 @@ function withRmwLock(sessionId, fn) { closeSync(fd); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`rmw lock cleanup failed for ${sessionId}: ${unlinkErr.message}`); } } } @@ -88,7 +98,10 @@ function finalizeSummary(sessionId, jsonlLines) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -118,6 +131,7 @@ async function uploadSummary(query2, params) { } // dist/src/hooks/wiki-worker.js +var dlog2 = (msg) => log("wiki-worker", msg); var cfg = JSON.parse(readFileSync2(process.argv[2], "utf-8")); var tmpDir = cfg.tmpDir; var tmpJsonl = join3(tmpDir, "session.jsonl"); @@ -165,7 +179,8 @@ async function query(sql, retries = 4) { function cleanup() { try { rmSync(tmpDir, { recursive: true, force: true }); - } catch { + } catch (cleanupErr) { + dlog2(`cleanup failed to remove ${tmpDir}: ${cleanupErr.message}`); } } async function main() { @@ -248,7 +263,8 @@ async function main() { cleanup(); try { releaseLock(cfg.sessionId); - } catch { + } catch (releaseErr) { + dlog2(`releaseLock failed in finally for ${cfg.sessionId}: ${releaseErr.message}`); } } } diff --git a/codex/bundle/capture.js b/codex/bundle/capture.js index 46b525f..719205e 100755 --- a/codex/bundle/capture.js +++ b/codex/bundle/capture.js @@ -303,6 +303,7 @@ var DeeplakeApi = class { import { readFileSync as readFileSync2, writeFileSync, writeSync, mkdirSync, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir3 } from "node:os"; import { join as join3 } from "node:path"; +var dlog = (msg) => log("summary-state", msg); var STATE_DIR = join3(homedir3(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function statePath(sessionId) { @@ -340,9 +341,11 @@ function withRmwLock(sessionId, fn) { if (e.code !== "EEXIST") throw e; if (Date.now() > deadline) { + dlog(`rmw lock deadline exceeded for ${sessionId}, reclaiming stale lock`); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`stale rmw lock unlink failed for ${sessionId}: ${unlinkErr.message}`); } continue; } @@ -355,7 +358,8 @@ function withRmwLock(sessionId, fn) { closeSync(fd); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`rmw lock cleanup failed for ${sessionId}: ${unlinkErr.message}`); } } } @@ -395,11 +399,13 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); if (Number.isFinite(ageMs) && ageMs < maxAgeMs) return false; - } catch { + } catch (readErr) { + dlog(`lock file unreadable for ${sessionId}, treating as stale: ${readErr.message}`); } try { unlinkSync(p); - } catch { + } catch (unlinkErr) { + dlog(`could not unlink stale lock for ${sessionId}: ${unlinkErr.message}`); return false; } } @@ -420,7 +426,10 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -624,9 +633,11 @@ function maybeTriggerPeriodicSummary(sessionId, cwd, config) { reason: "Periodic" }); } catch (e) { + log3(`periodic spawn failed: ${e.message}`); try { releaseLock(sessionId); - } catch { + } catch (releaseErr) { + log3(`releaseLock after periodic spawn failure also failed: ${releaseErr.message}`); } throw e; } diff --git a/codex/bundle/stop.js b/codex/bundle/stop.js index 72716c4..e5b3c8b 100755 --- a/codex/bundle/stop.js +++ b/codex/bundle/stop.js @@ -412,6 +412,7 @@ function bundleDirFromImportMeta(importMetaUrl) { import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir4 } from "node:os"; import { join as join4 } from "node:path"; +var dlog = (msg) => log("summary-state", msg); var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function lockPath(sessionId) { @@ -425,11 +426,13 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { const ageMs = Date.now() - parseInt(readFileSync2(p, "utf-8"), 10); if (Number.isFinite(ageMs) && ageMs < maxAgeMs) return false; - } catch { + } catch (readErr) { + dlog(`lock file unreadable for ${sessionId}, treating as stale: ${readErr.message}`); } try { unlinkSync(p); - } catch { + } catch (unlinkErr) { + dlog(`could not unlink stale lock for ${sessionId}: ${unlinkErr.message}`); return false; } } @@ -450,7 +453,10 @@ function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -544,9 +550,11 @@ async function main() { reason: "Stop" }); } catch (e) { + log3(`spawn failed: ${e.message}`); try { releaseLock(sessionId); - } catch { + } catch (releaseErr) { + log3(`releaseLock after spawn failure also failed: ${releaseErr.message}`); } throw e; } diff --git a/codex/bundle/wiki-worker.js b/codex/bundle/wiki-worker.js index 1b596aa..913c279 100755 --- a/codex/bundle/wiki-worker.js +++ b/codex/bundle/wiki-worker.js @@ -1,21 +1,37 @@ #!/usr/bin/env node // dist/src/hooks/codex/wiki-worker.js -import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, existsSync as existsSync2, appendFileSync, mkdirSync as mkdirSync2, rmSync } from "node:fs"; +import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, existsSync as existsSync2, appendFileSync as appendFileSync2, mkdirSync as mkdirSync2, rmSync } from "node:fs"; import { execFileSync } from "node:child_process"; -import { join as join2 } from "node:path"; +import { join as join3 } from "node:path"; // dist/src/hooks/summary-state.js import { readFileSync, writeFileSync, writeSync, mkdirSync, renameSync, existsSync, unlinkSync, openSync, closeSync } from "node:fs"; -import { homedir } from "node:os"; +import { homedir as homedir2 } from "node:os"; +import { join as join2 } from "node:path"; + +// dist/src/utils/debug.js +import { appendFileSync } from "node:fs"; import { join } from "node:path"; -var STATE_DIR = join(homedir(), ".claude", "hooks", "summary-state"); +import { homedir } from "node:os"; +var DEBUG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; +var LOG = join(homedir(), ".deeplake", "hook-debug.log"); +function log(tag, msg) { + if (!DEBUG) + return; + appendFileSync(LOG, `${(/* @__PURE__ */ new Date()).toISOString()} [${tag}] ${msg} +`); +} + +// dist/src/hooks/summary-state.js +var dlog = (msg) => log("summary-state", msg); +var STATE_DIR = join2(homedir2(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function statePath(sessionId) { - return join(STATE_DIR, `${sessionId}.json`); + return join2(STATE_DIR, `${sessionId}.json`); } function lockPath(sessionId) { - return join(STATE_DIR, `${sessionId}.lock`); + return join2(STATE_DIR, `${sessionId}.lock`); } function readState(sessionId) { const p = statePath(sessionId); @@ -46,9 +62,11 @@ function withRmwLock(sessionId, fn) { if (e.code !== "EEXIST") throw e; if (Date.now() > deadline) { + dlog(`rmw lock deadline exceeded for ${sessionId}, reclaiming stale lock`); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`stale rmw lock unlink failed for ${sessionId}: ${unlinkErr.message}`); } continue; } @@ -61,7 +79,8 @@ function withRmwLock(sessionId, fn) { closeSync(fd); try { unlinkSync(rmwLock); - } catch { + } catch (unlinkErr) { + dlog(`rmw lock cleanup failed for ${sessionId}: ${unlinkErr.message}`); } } } @@ -78,7 +97,10 @@ function finalizeSummary(sessionId, jsonlLines) { function releaseLock(sessionId) { try { unlinkSync(lockPath(sessionId)); - } catch { + } catch (e) { + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } } } @@ -108,14 +130,15 @@ async function uploadSummary(query2, params) { } // dist/src/hooks/codex/wiki-worker.js +var dlog2 = (msg) => log("codex-wiki-worker", msg); var cfg = JSON.parse(readFileSync2(process.argv[2], "utf-8")); var tmpDir = cfg.tmpDir; -var tmpJsonl = join2(tmpDir, "session.jsonl"); -var tmpSummary = join2(tmpDir, "summary.md"); +var tmpJsonl = join3(tmpDir, "session.jsonl"); +var tmpSummary = join3(tmpDir, "summary.md"); function wlog(msg) { try { mkdirSync2(cfg.hooksDir, { recursive: true }); - appendFileSync(cfg.wikiLog, `[${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").slice(0, 19)}] wiki-worker(${cfg.sessionId}): ${msg} + appendFileSync2(cfg.wikiLog, `[${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").slice(0, 19)}] wiki-worker(${cfg.sessionId}): ${msg} `); } catch { } @@ -155,7 +178,8 @@ async function query(sql, retries = 4) { function cleanup() { try { rmSync(tmpDir, { recursive: true, force: true }); - } catch { + } catch (cleanupErr) { + dlog2(`cleanup failed to remove ${tmpDir}: ${cleanupErr.message}`); } } async function main() { @@ -234,7 +258,8 @@ async function main() { cleanup(); try { releaseLock(cfg.sessionId); - } catch { + } catch (releaseErr) { + dlog2(`releaseLock failed in finally for ${cfg.sessionId}: ${releaseErr.message}`); } } } diff --git a/src/hooks/capture.ts b/src/hooks/capture.ts index b199d49..2628bd5 100644 --- a/src/hooks/capture.ts +++ b/src/hooks/capture.ts @@ -172,7 +172,12 @@ function maybeTriggerPeriodicSummary(sessionId: string, cwd: string, config: Con reason: "Periodic", }); } catch (e: any) { - try { releaseLock(sessionId); } catch { /* ignore */ } + log(`periodic spawn failed: ${e.message}`); + try { + releaseLock(sessionId); + } catch (releaseErr: any) { + log(`releaseLock after periodic spawn failure also failed: ${releaseErr.message}`); + } throw e; } } catch (e: any) { diff --git a/src/hooks/codex/capture.ts b/src/hooks/codex/capture.ts index e749a6b..5414617 100644 --- a/src/hooks/codex/capture.ts +++ b/src/hooks/codex/capture.ts @@ -148,7 +148,12 @@ function maybeTriggerPeriodicSummary(sessionId: string, cwd: string, config: Con reason: "Periodic", }); } catch (e: any) { - try { releaseLock(sessionId); } catch { /* ignore */ } + log(`periodic spawn failed: ${e.message}`); + try { + releaseLock(sessionId); + } catch (releaseErr: any) { + log(`releaseLock after periodic spawn failure also failed: ${releaseErr.message}`); + } throw e; } } catch (e: any) { diff --git a/src/hooks/codex/stop.ts b/src/hooks/codex/stop.ts index 2b6c60d..8c2820e 100644 --- a/src/hooks/codex/stop.ts +++ b/src/hooks/codex/stop.ts @@ -142,7 +142,12 @@ async function main(): Promise { // Spawn threw before the worker took ownership of the lock: release // it here so a --resume can retrigger periodic summaries without // waiting for the 10-minute stale reclaim. - try { releaseLock(sessionId); } catch { /* ignore */ } + log(`spawn failed: ${e.message}`); + try { + releaseLock(sessionId); + } catch (releaseErr: any) { + log(`releaseLock after spawn failure also failed: ${releaseErr.message}`); + } throw e; } } diff --git a/src/hooks/codex/wiki-worker.ts b/src/hooks/codex/wiki-worker.ts index a7c50f8..7d74f75 100644 --- a/src/hooks/codex/wiki-worker.ts +++ b/src/hooks/codex/wiki-worker.ts @@ -12,6 +12,9 @@ import { execFileSync } from "node:child_process"; import { join } from "node:path"; import { finalizeSummary, releaseLock } from "../summary-state.js"; import { uploadSummary } from "../upload-summary.js"; +import { log as _log } from "../../utils/debug.js"; + +const dlog = (msg: string) => _log("codex-wiki-worker", msg); interface WorkerConfig { apiUrl: string; @@ -88,7 +91,11 @@ async function query(sql: string, retries = 4): Promise[ } function cleanup(): void { - try { rmSync(tmpDir, { recursive: true, force: true }); } catch { /* ignore */ } + try { + rmSync(tmpDir, { recursive: true, force: true }); + } catch (cleanupErr: any) { + dlog(`cleanup failed to remove ${tmpDir}: ${cleanupErr.message}`); + } } async function main(): Promise { @@ -196,7 +203,11 @@ async function main(): Promise { wlog(`fatal: ${e.message}`); } finally { cleanup(); - try { releaseLock(cfg.sessionId); } catch { /* ignore */ } + try { + releaseLock(cfg.sessionId); + } catch (releaseErr: any) { + dlog(`releaseLock failed in finally for ${cfg.sessionId}: ${releaseErr.message}`); + } } } diff --git a/src/hooks/session-end.ts b/src/hooks/session-end.ts index 5e24bc3..751669a 100644 --- a/src/hooks/session-end.ts +++ b/src/hooks/session-end.ts @@ -55,7 +55,12 @@ async function main(): Promise { // Spawn threw before the worker took ownership of the lock: release // it here so a --resume can retrigger periodic summaries without // waiting for the 10-minute stale reclaim. - try { releaseLock(sessionId); } catch { /* ignore */ } + log(`spawn failed: ${e.message}`); + try { + releaseLock(sessionId); + } catch (releaseErr: any) { + log(`releaseLock after spawn failure also failed: ${releaseErr.message}`); + } throw e; } } diff --git a/src/hooks/summary-state.ts b/src/hooks/summary-state.ts index b10ba3e..9873e4f 100644 --- a/src/hooks/summary-state.ts +++ b/src/hooks/summary-state.ts @@ -14,6 +14,9 @@ import { } from "node:fs"; import { homedir } from "node:os"; import { join } from "node:path"; +import { log as _log } from "../utils/debug.js"; + +const dlog = (msg: string) => _log("summary-state", msg); export interface SummaryState { lastSummaryAt: number; @@ -61,7 +64,12 @@ export function withRmwLock(sessionId: string, fn: () => T): T { } catch (e: any) { if (e.code !== "EEXIST") throw e; if (Date.now() > deadline) { - try { unlinkSync(rmwLock); } catch { /* ignore */ } + dlog(`rmw lock deadline exceeded for ${sessionId}, reclaiming stale lock`); + try { + unlinkSync(rmwLock); + } catch (unlinkErr: any) { + dlog(`stale rmw lock unlink failed for ${sessionId}: ${unlinkErr.message}`); + } continue; } Atomics.wait(YIELD_BUF, 0, 0, 10); @@ -71,7 +79,11 @@ export function withRmwLock(sessionId: string, fn: () => T): T { return fn(); } finally { closeSync(fd); - try { unlinkSync(rmwLock); } catch { /* ignore */ } + try { + unlinkSync(rmwLock); + } catch (unlinkErr: any) { + dlog(`rmw lock cleanup failed for ${sessionId}: ${unlinkErr.message}`); + } } } @@ -129,8 +141,17 @@ export function tryAcquireLock(sessionId: string, maxAgeMs = 10 * 60 * 1000): bo try { const ageMs = Date.now() - parseInt(readFileSync(p, "utf-8"), 10); if (Number.isFinite(ageMs) && ageMs < maxAgeMs) return false; - } catch { /* treat unreadable as stale */ } - try { unlinkSync(p); } catch { return false; } + } catch (readErr: any) { + // Unreadable lock content: treat as stale and log for visibility + // (HIVEMIND_DEBUG-gated) so we know why stale reclaim fired. + dlog(`lock file unreadable for ${sessionId}, treating as stale: ${readErr.message}`); + } + try { + unlinkSync(p); + } catch (unlinkErr: any) { + dlog(`could not unlink stale lock for ${sessionId}: ${unlinkErr.message}`); + return false; + } } try { const fd = openSync(p, "wx"); @@ -145,5 +166,11 @@ export function tryAcquireLock(sessionId: string, maxAgeMs = 10 * 60 * 1000): bo export function releaseLock(sessionId: string): void { try { unlinkSync(lockPath(sessionId)); - } catch { /* ignore */ } + } catch (e: any) { + // ENOENT is normal (lock wasn't held); everything else is worth + // seeing in debug mode. + if (e?.code !== "ENOENT") { + dlog(`releaseLock unlink failed for ${sessionId}: ${e.message}`); + } + } } diff --git a/src/hooks/wiki-worker.ts b/src/hooks/wiki-worker.ts index 6e12445..2359ea0 100644 --- a/src/hooks/wiki-worker.ts +++ b/src/hooks/wiki-worker.ts @@ -10,7 +10,9 @@ import { readFileSync, writeFileSync, existsSync, appendFileSync, mkdirSync, rmSync } from "node:fs"; import { execFileSync } from "node:child_process"; import { join } from "node:path"; -import { utcTimestamp } from "../utils/debug.js"; +import { utcTimestamp, log as _log } from "../utils/debug.js"; + +const dlog = (msg: string) => _log("wiki-worker", msg); import { finalizeSummary, releaseLock } from "./summary-state.js"; import { uploadSummary } from "./upload-summary.js"; @@ -91,7 +93,11 @@ async function query(sql: string, retries = 4): Promise[ } function cleanup(): void { - try { rmSync(tmpDir, { recursive: true, force: true }); } catch { /* ignore */ } + try { + rmSync(tmpDir, { recursive: true, force: true }); + } catch (cleanupErr: any) { + dlog(`cleanup failed to remove ${tmpDir}: ${cleanupErr.message}`); + } } async function main(): Promise { @@ -202,7 +208,13 @@ async function main(): Promise { wlog(`fatal: ${e.message}`); } finally { cleanup(); - try { releaseLock(cfg.sessionId); } catch { /* ignore */ } + try { + releaseLock(cfg.sessionId); + } catch (releaseErr: any) { + // Gated on HIVEMIND_DEBUG — we don't want a release failure at + // worker shutdown to pollute the wiki log every run. + dlog(`releaseLock failed in finally for ${cfg.sessionId}: ${releaseErr.message}`); + } } } From bbc6df903ad62c7e3d7d7aa034381e2ceb0e2796 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:18:13 +0000 Subject: [PATCH 24/39] test(baseline_cloud): expand real-QA coverage to 5 QAs and add /sessions/* Read MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extends the integration test suite for fix #1 and fix #2 with two more QAs — qa_3 (Caroline's research) and qa_29 (Melanie's pottery workshop) — bringing the REAL_QAS pool to five. qa_3 specifically maps to the Read calls that fired in the `baseline_cloud_9qa_read_candidates_fix2` benchmark run (three Read calls, all against memory paths), so its inclusion anchors the test suite against live behavior observed on the sessions-only `locomo_benchmark/baseline` workspace. Adds a dedicated test for the other Read-tool regression surface: a Read against a /sessions/.json path (not only /index.md). The same benchmark run showed haiku calling `Read /home/.deeplake/memory/sessions/conv_0_session_{1,2}.json` directly; the new test feeds that exact shape through `processPreToolUse`, asserts the decision carries `file_path` (not `command`), and verifies the session JSON body is materialized to the read cache at the expected virtual path. Renames the test file from `pre-tool-use-baseline-cloud-3qa.test.ts` to `pre-tool-use-baseline-cloud.test.ts` now that it covers more than three QAs. Verification: 13 / 13 tests pass; temporarily stashing the fix #2 source change makes the new per-QA Read assertions and the /sessions Read assertion all fail (decision.file_path is undefined), restoring the source brings them back to green. --- ...ts => pre-tool-use-baseline-cloud.test.ts} | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) rename claude-code/tests/{pre-tool-use-baseline-cloud-3qa.test.ts => pre-tool-use-baseline-cloud.test.ts} (71%) diff --git a/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts b/claude-code/tests/pre-tool-use-baseline-cloud.test.ts similarity index 71% rename from claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts rename to claude-code/tests/pre-tool-use-baseline-cloud.test.ts index acce536..40e4ab4 100644 --- a/claude-code/tests/pre-tool-use-baseline-cloud-3qa.test.ts +++ b/claude-code/tests/pre-tool-use-baseline-cloud.test.ts @@ -54,6 +54,12 @@ if (SESSION_ROWS.length !== 272) { // fix. Each row is verbatim from the scored JSONL except `session_file` // which records the session we'd expect Claude to land on. const REAL_QAS = [ + { + name: "qa_3: Caroline's research (fix #2 smoke — real run did Read x3)", + question: "What did Caroline research?", + gold_answer: "Adoption agencies", + expected_session_file: "/sessions/conv_0_session_1.json", + }, { name: "qa_6: Melanie's camping plans", question: "When is Melanie planning on going camping?", @@ -66,6 +72,12 @@ const REAL_QAS = [ gold_answer: "10 July 2023", expected_session_file: "/sessions/conv_0_session_7.json", }, + { + name: "qa_29: Melanie's pottery workshop", + question: "When did Melanie go to the pottery workshop?", + gold_answer: "The Friday before 15 July 2023", + expected_session_file: "/sessions/conv_0_session_7.json", + }, { name: "qa_46: Melanie as an ally", question: "Would Melanie be considered an ally to the transgender community?", @@ -208,4 +220,74 @@ describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { }); }); } + + // ── Regression coverage anchored in a real benchmark run ───────────── + // + // In `baseline_cloud_9qa_read_candidates_fix2` (2026-04-20), haiku chose + // to call the Read tool directly against session files — not just + // /index.md. Specifically, qa_3 did three Read calls including + // Read /home/.deeplake/memory/sessions/conv_0_session_1.json and + // Read /home/.deeplake/memory/sessions/conv_0_session_2.json, and all + // three succeeded (zero "path must be of type string" errors) after + // fix #2 landed. The previous run on the same workspace without the fix + // produced that error on every memory-path Read call. + // + // This test drives the same session-file Read through processPreToolUse + // and asserts the decision shape matches what Claude Code's Read tool + // expects — i.e. `updatedInput: {file_path}`, not `{command}`. + + it("Read /sessions/ intercept returns file_path pointing to the session content (qa_3 real-run path)", async () => { + const sessionJson = JSON.stringify({ + conversation_id: 0, + session_number: 1, + date_time: "8 May, 2023", + speakers: { speaker_a: "Caroline", speaker_b: "Melanie" }, + turns: [ + { speaker: "Caroline", dia_id: "D1:1", text: "Hey Mel! Good to see you!" }, + ], + }); + + const api = { + query: vi.fn(async (sql: string) => { + // Exact-path read hits the sessions table. + if (/FROM\s+"sessions"/i.test(sql) && /conv_0_session_1\.json/.test(sql)) { + return [{ path: "/sessions/conv_0_session_1.json", content: sessionJson, source_order: 1 }]; + } + if (/FROM\s+"memory"/i.test(sql)) return []; + return []; + }), + } as any; + const capturedReadFiles: Array<{ sessionId: string; virtualPath: string; content: string }> = []; + + const decision = await processPreToolUse( + { + session_id: "s-qa3-session-read", + tool_name: "Read", + tool_input: { file_path: "~/.deeplake/memory/sessions/conv_0_session_1.json" }, + tool_use_id: "tu-read-session-1", + }, + { + config: BASE_CONFIG, + createApi: vi.fn(() => api), + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + readCachedIndexContentFn: () => null, + writeCachedIndexContentFn: () => undefined, + writeReadCacheFileFn: ((sessionId: string, virtualPath: string, content: string) => { + capturedReadFiles.push({ sessionId, virtualPath, content }); + return `/tmp/test-${sessionId}${virtualPath}`; + }) as any, + }, + ); + + // Read-tool shape: decision must carry file_path, not just command. + expect(decision).not.toBeNull(); + expect(decision?.file_path).toBe("/tmp/test-s-qa3-session-read/sessions/conv_0_session_1.json"); + + // Content materialized exactly once, at the right virtual path, with + // the real session payload Claude needs to answer qa_3. + expect(capturedReadFiles).toHaveLength(1); + expect(capturedReadFiles[0]?.virtualPath).toBe("/sessions/conv_0_session_1.json"); + expect(capturedReadFiles[0]?.content).toContain("Caroline"); + expect(capturedReadFiles[0]?.content).toContain("8 May, 2023"); + }); }); From f865633c9cbf78127c818be037553251c38c220a Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:21:05 +0000 Subject: [PATCH 25/39] refactor: extract duplicated helpers into src/utils/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three helpers were cut-and-pasted across the CC + Codex session-start surface and the capture hooks. Extracted into dedicated modules so they have one place to change and one place to test: - src/utils/version-check.ts — getInstalledVersion, getLatestVersion, isNewer. Previously duplicated across session-start.ts, session-start-setup.ts and their codex twins. The CC variant reads .claude-plugin/plugin.json, the Codex variant reads .codex-plugin/plugin.json; callers now pass the manifest dir as a parameter. - src/utils/wiki-log.ts — makeWikiLogger factory. Four files had an identical wikiLog body differing only by the ~/.claude vs ~/.codex hook dir. spawn-wiki-worker.ts (CC + Codex) and the two session-start files now take the logger from the factory. The user-visible log path and content format are unchanged. - src/utils/session-path.ts — buildSessionPath. Identical in capture.ts, codex/capture.ts and codex/stop.ts. No behavior change: typecheck + 657 tests + build all clean. Removed now-unused imports (readFileSync, mkdirSync, appendFileSync, utcTimestamp) from the consumer files. --- claude-code/bundle/capture.js | 60 ++++++----- claude-code/bundle/session-end.js | 58 +++++++---- claude-code/bundle/session-start-setup.js | 55 ++++++---- claude-code/bundle/session-start.js | 119 ++++++++++++---------- codex/bundle/capture.js | 58 +++++++---- codex/bundle/session-start-setup.js | 58 +++++++---- codex/bundle/session-start.js | 37 +++---- codex/bundle/stop.js | 68 ++++++++----- src/hooks/capture.ts | 11 +- src/hooks/codex/capture.ts | 5 +- src/hooks/codex/session-start-setup.ts | 57 +---------- src/hooks/codex/session-start.ts | 24 +---- src/hooks/codex/spawn-wiki-worker.ts | 13 +-- src/hooks/codex/stop.ts | 5 +- src/hooks/session-start-setup.ts | 59 +---------- src/hooks/session-start.ts | 63 ++---------- src/hooks/spawn-wiki-worker.ts | 14 +-- src/utils/session-path.ts | 13 +++ src/utils/version-check.ts | 67 ++++++++++++ src/utils/wiki-log.ts | 32 ++++++ 20 files changed, 453 insertions(+), 423 deletions(-) create mode 100644 src/utils/session-path.ts create mode 100644 src/utils/version-check.ts create mode 100644 src/utils/wiki-log.ts diff --git a/claude-code/bundle/capture.js b/claude-code/bundle/capture.js index c207476..3b5a215 100755 --- a/claude-code/bundle/capture.js +++ b/claude-code/bundle/capture.js @@ -302,6 +302,12 @@ var DeeplakeApi = class { } }; +// dist/src/utils/session-path.js +function buildSessionPath(config, sessionId) { + const workspace = config.workspaceId ?? "default"; + return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${workspace}_${sessionId}.jsonl`; +} + // dist/src/hooks/summary-state.js import { readFileSync as readFileSync2, writeFileSync, writeSync, mkdirSync, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir3 } from "node:os"; @@ -439,11 +445,32 @@ function releaseLock(sessionId) { // dist/src/hooks/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; -import { dirname, join as join4 } from "node:path"; -import { writeFileSync as writeFileSync2, mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { dirname, join as join5 } from "node:path"; +import { writeFileSync as writeFileSync2, mkdirSync as mkdirSync3 } from "node:fs"; import { homedir as homedir4, tmpdir } from "node:os"; + +// dist/src/utils/wiki-log.js +import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join4 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join4(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync2(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/spawn-wiki-worker.js var HOME = homedir4(); -var WIKI_LOG = join4(HOME, ".claude", "hooks", "deeplake-wiki.log"); +var wikiLogger = makeWikiLogger(join5(HOME, ".claude", "hooks")); +var WIKI_LOG = wikiLogger.path; var WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge \u2014 entities, decisions, relationships, and facts \u2014 into a structured, searchable wiki entry. Think of this as building a knowledge graph, not writing a summary. SESSION JSONL path: __JSONL__ @@ -496,27 +523,20 @@ IMPORTANT: Be exhaustive. Extract EVERY entity, decision, and fact. Future you w PRIVACY: Never include absolute filesystem paths (e.g. /home/user/..., /Users/..., C:\\\\...) in the summary. Use only project-relative paths or the project name. The Source and Project fields above are already correct \u2014 do not change them. LENGTH LIMIT: Keep the total summary under 4000 characters. Be dense and concise \u2014 prioritize facts over prose. If a session is short, the summary should be short too.`; -function wikiLog(msg) { - try { - mkdirSync2(join4(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${utcTimestamp()}] ${msg} -`); - } catch { - } -} +var wikiLog = wikiLogger.log; function findClaudeBin() { try { return execSync("which claude 2>/dev/null", { encoding: "utf-8" }).trim(); } catch { - return join4(HOME, ".claude", "local", "claude"); + return join5(HOME, ".claude", "local", "claude"); } } function spawnWikiWorker(opts) { const { config, sessionId, cwd, bundleDir, reason } = opts; const projectName = cwd.split("/").pop() || "unknown"; - const tmpDir = join4(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); - mkdirSync2(tmpDir, { recursive: true }); - const configFile = join4(tmpDir, "config.json"); + const tmpDir = join5(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); + mkdirSync3(tmpDir, { recursive: true }); + const configFile = join5(tmpDir, "config.json"); writeFileSync2(configFile, JSON.stringify({ apiUrl: config.apiUrl, token: config.token, @@ -530,11 +550,11 @@ function spawnWikiWorker(opts) { tmpDir, claudeBin: findClaudeBin(), wikiLog: WIKI_LOG, - hooksDir: join4(HOME, ".claude", "hooks"), + hooksDir: join5(HOME, ".claude", "hooks"), promptTemplate: WIKI_PROMPT_TEMPLATE })); wikiLog(`${reason}: spawning summary worker for ${sessionId}`); - const workerPath = join4(bundleDir, "wiki-worker.js"); + const workerPath = join5(bundleDir, "wiki-worker.js"); spawn("nohup", ["node", workerPath, configFile], { detached: true, stdio: ["ignore", "ignore", "ignore"] @@ -548,12 +568,6 @@ function bundleDirFromImportMeta(importMetaUrl) { // dist/src/hooks/capture.js var log3 = (msg) => log("capture", msg); var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -function buildSessionPath(config, sessionId) { - const userName = config.userName; - const orgName = config.orgName; - const workspace = config.workspaceId ?? "default"; - return `/sessions/${userName}/${userName}_${orgName}_${workspace}_${sessionId}.jsonl`; -} async function main() { if (!CAPTURE) return; diff --git a/claude-code/bundle/session-end.js b/claude-code/bundle/session-end.js index 77caa34..c10f5db 100755 --- a/claude-code/bundle/session-end.js +++ b/claude-code/bundle/session-end.js @@ -72,11 +72,32 @@ function log(tag, msg) { // dist/src/hooks/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; -import { dirname, join as join3 } from "node:path"; -import { writeFileSync, mkdirSync, appendFileSync as appendFileSync2 } from "node:fs"; +import { dirname, join as join4 } from "node:path"; +import { writeFileSync, mkdirSync as mkdirSync2 } from "node:fs"; import { homedir as homedir3, tmpdir } from "node:os"; + +// dist/src/utils/wiki-log.js +import { mkdirSync, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join3 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join3(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/spawn-wiki-worker.js var HOME = homedir3(); -var WIKI_LOG = join3(HOME, ".claude", "hooks", "deeplake-wiki.log"); +var wikiLogger = makeWikiLogger(join4(HOME, ".claude", "hooks")); +var WIKI_LOG = wikiLogger.path; var WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge \u2014 entities, decisions, relationships, and facts \u2014 into a structured, searchable wiki entry. Think of this as building a knowledge graph, not writing a summary. SESSION JSONL path: __JSONL__ @@ -129,27 +150,20 @@ IMPORTANT: Be exhaustive. Extract EVERY entity, decision, and fact. Future you w PRIVACY: Never include absolute filesystem paths (e.g. /home/user/..., /Users/..., C:\\\\...) in the summary. Use only project-relative paths or the project name. The Source and Project fields above are already correct \u2014 do not change them. LENGTH LIMIT: Keep the total summary under 4000 characters. Be dense and concise \u2014 prioritize facts over prose. If a session is short, the summary should be short too.`; -function wikiLog(msg) { - try { - mkdirSync(join3(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${utcTimestamp()}] ${msg} -`); - } catch { - } -} +var wikiLog = wikiLogger.log; function findClaudeBin() { try { return execSync("which claude 2>/dev/null", { encoding: "utf-8" }).trim(); } catch { - return join3(HOME, ".claude", "local", "claude"); + return join4(HOME, ".claude", "local", "claude"); } } function spawnWikiWorker(opts) { const { config, sessionId, cwd, bundleDir, reason } = opts; const projectName = cwd.split("/").pop() || "unknown"; - const tmpDir = join3(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); - mkdirSync(tmpDir, { recursive: true }); - const configFile = join3(tmpDir, "config.json"); + const tmpDir = join4(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); + mkdirSync2(tmpDir, { recursive: true }); + const configFile = join4(tmpDir, "config.json"); writeFileSync(configFile, JSON.stringify({ apiUrl: config.apiUrl, token: config.token, @@ -163,11 +177,11 @@ function spawnWikiWorker(opts) { tmpDir, claudeBin: findClaudeBin(), wikiLog: WIKI_LOG, - hooksDir: join3(HOME, ".claude", "hooks"), + hooksDir: join4(HOME, ".claude", "hooks"), promptTemplate: WIKI_PROMPT_TEMPLATE })); wikiLog(`${reason}: spawning summary worker for ${sessionId}`); - const workerPath = join3(bundleDir, "wiki-worker.js"); + const workerPath = join4(bundleDir, "wiki-worker.js"); spawn("nohup", ["node", workerPath, configFile], { detached: true, stdio: ["ignore", "ignore", "ignore"] @@ -179,17 +193,17 @@ function bundleDirFromImportMeta(importMetaUrl) { } // dist/src/hooks/summary-state.js -import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; +import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync3, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir4 } from "node:os"; -import { join as join4 } from "node:path"; +import { join as join5 } from "node:path"; var dlog = (msg) => log("summary-state", msg); -var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); +var STATE_DIR = join5(homedir4(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function lockPath(sessionId) { - return join4(STATE_DIR, `${sessionId}.lock`); + return join5(STATE_DIR, `${sessionId}.lock`); } function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { - mkdirSync2(STATE_DIR, { recursive: true }); + mkdirSync3(STATE_DIR, { recursive: true }); const p = lockPath(sessionId); if (existsSync2(p)) { try { diff --git a/claude-code/bundle/session-start-setup.js b/claude-code/bundle/session-start-setup.js index 09c2cff..bec63e9 100755 --- a/claude-code/bundle/session-start-setup.js +++ b/claude-code/bundle/session-start-setup.js @@ -2,8 +2,7 @@ // dist/src/hooks/session-start-setup.js import { fileURLToPath } from "node:url"; -import { dirname, join as join4 } from "node:path"; -import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2, readFileSync as readFileSync3 } from "node:fs"; +import { dirname as dirname2, join as join6 } from "node:path"; import { execSync as execSync2 } from "node:child_process"; import { homedir as homedir4 } from "node:os"; @@ -331,30 +330,19 @@ function readStdin() { }); } -// dist/src/hooks/session-start-setup.js -var log3 = (msg) => log("session-setup", msg); -var __bundleDir = dirname(fileURLToPath(import.meta.url)); +// dist/src/utils/version-check.js +import { readFileSync as readFileSync3 } from "node:fs"; +import { dirname, join as join4 } from "node:path"; var GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -var VERSION_CHECK_TIMEOUT = 3e3; -var HOME = homedir4(); -var WIKI_LOG = join4(HOME, ".claude", "hooks", "deeplake-wiki.log"); -function wikiLog(msg) { +function getInstalledVersion(bundleDir, pluginManifestDir) { try { - mkdirSync2(join4(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${utcTimestamp()}] ${msg} -`); - } catch { - } -} -function getInstalledVersion() { - try { - const pluginJson = join4(__bundleDir, "..", ".claude-plugin", "plugin.json"); + const pluginJson = join4(bundleDir, "..", pluginManifestDir, "plugin.json"); const plugin = JSON.parse(readFileSync3(pluginJson, "utf-8")); if (plugin.version) return plugin.version; } catch { } - let dir = __bundleDir; + let dir = bundleDir; for (let i = 0; i < 5; i++) { const candidate = join4(dir, "package.json"); try { @@ -370,9 +358,9 @@ function getInstalledVersion() { } return null; } -async function getLatestVersion() { +async function getLatestVersion(timeoutMs = 3e3) { try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); + const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(timeoutMs) }); if (!res.ok) return null; const pkg = await res.json(); @@ -387,6 +375,29 @@ function isNewer(latest, current) { const [ca, cb, cc] = parse(current); return la > ca || la === ca && lb > cb || la === ca && lb === cb && lc > cc; } + +// dist/src/utils/wiki-log.js +import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join5 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join5(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync2(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/session-start-setup.js +var log3 = (msg) => log("session-setup", msg); +var __bundleDir = dirname2(fileURLToPath(import.meta.url)); +var { log: wikiLog } = makeWikiLogger(join6(homedir4(), ".claude", "hooks")); async function main() { if (process.env.HIVEMIND_WIKI_WORKER === "1") return; @@ -421,7 +432,7 @@ async function main() { } const autoupdate = creds.autoupdate !== false; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".claude-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { diff --git a/claude-code/bundle/session-start.js b/claude-code/bundle/session-start.js index 765666d..f136de0 100755 --- a/claude-code/bundle/session-start.js +++ b/claude-code/bundle/session-start.js @@ -2,8 +2,8 @@ // dist/src/hooks/session-start.js import { fileURLToPath } from "node:url"; -import { dirname, join as join4 } from "node:path"; -import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2, readFileSync as readFileSync3, readdirSync, rmSync } from "node:fs"; +import { dirname as dirname2, join as join6 } from "node:path"; +import { readdirSync, rmSync } from "node:fs"; import { execSync as execSync2 } from "node:child_process"; import { homedir as homedir4 } from "node:os"; @@ -331,51 +331,19 @@ function readStdin() { }); } -// dist/src/hooks/session-start.js -var log3 = (msg) => log("session-start", msg); -var __bundleDir = dirname(fileURLToPath(import.meta.url)); -var AUTH_CMD = join4(__bundleDir, "commands", "auth-login.js"); -var context = `DEEPLAKE MEMORY: You have TWO memory sources. ALWAYS check BOTH when the user asks you to recall, remember, or look up ANY information: - -1. Your built-in memory (~/.claude/) \u2014 personal per-project notes -2. Deeplake global memory (~/.deeplake/memory/) \u2014 global memory shared across all sessions, users, and agents in the org - -Deeplake memory structure: -- ~/.deeplake/memory/index.md \u2014 START HERE, table of all sessions -- ~/.deeplake/memory/summaries/username/*.md \u2014 AI-generated wiki summaries per session -- ~/.deeplake/memory/sessions/username/*.jsonl \u2014 raw session data (last resort) - -SEARCH STRATEGY: Always read index.md first. Then read specific summaries. Only read raw JSONL if summaries don't have enough detail. Do NOT jump straight to JSONL files. - -Search command: Grep pattern="keyword" path="~/.deeplake/memory" - -Organization management \u2014 each argument is SEPARATE (do NOT quote subcommands together): -- node "HIVEMIND_AUTH_CMD" login \u2014 SSO login -- node "HIVEMIND_AUTH_CMD" whoami \u2014 show current user/org -- node "HIVEMIND_AUTH_CMD" org list \u2014 list organizations -- node "HIVEMIND_AUTH_CMD" org switch \u2014 switch organization -- node "HIVEMIND_AUTH_CMD" workspaces \u2014 list workspaces -- node "HIVEMIND_AUTH_CMD" workspace \u2014 switch workspace -- node "HIVEMIND_AUTH_CMD" invite \u2014 invite member (ALWAYS ask user which role before inviting) -- node "HIVEMIND_AUTH_CMD" members \u2014 list members -- node "HIVEMIND_AUTH_CMD" remove \u2014 remove member - -IMPORTANT: Only use bash commands (cat, ls, grep, echo, jq, head, tail, etc.) to interact with ~/.deeplake/memory/. Do NOT use python, python3, node, curl, or other interpreters \u2014 they are not available in the memory filesystem. If a task seems to require Python, rewrite it using bash commands and standard text-processing tools (awk, sed, jq, grep, etc.). - -LIMITS: Do NOT spawn subagents to read deeplake memory. If a file returns empty after 2 attempts, skip it and move on. Report what you found rather than exhaustively retrying. - -Debugging: Set HIVEMIND_DEBUG=1 to enable verbose logging to ~/.deeplake/hook-debug.log`; +// dist/src/utils/version-check.js +import { readFileSync as readFileSync3 } from "node:fs"; +import { dirname, join as join4 } from "node:path"; var GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -var VERSION_CHECK_TIMEOUT = 3e3; -function getInstalledVersion() { +function getInstalledVersion(bundleDir, pluginManifestDir) { try { - const pluginJson = join4(__bundleDir, "..", ".claude-plugin", "plugin.json"); + const pluginJson = join4(bundleDir, "..", pluginManifestDir, "plugin.json"); const plugin = JSON.parse(readFileSync3(pluginJson, "utf-8")); if (plugin.version) return plugin.version; } catch { } - let dir = __bundleDir; + let dir = bundleDir; for (let i = 0; i < 5; i++) { const candidate = join4(dir, "package.json"); try { @@ -391,9 +359,9 @@ function getInstalledVersion() { } return null; } -async function getLatestVersion() { +async function getLatestVersion(timeoutMs = 3e3) { try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); + const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(timeoutMs) }); if (!res.ok) return null; const pkg = await res.json(); @@ -408,16 +376,61 @@ function isNewer(latest, current) { const [ca, cb, cc] = parse(current); return la > ca || la === ca && lb > cb || la === ca && lb === cb && lc > cc; } -var HOME = homedir4(); -var WIKI_LOG = join4(HOME, ".claude", "hooks", "deeplake-wiki.log"); -function wikiLog(msg) { - try { - mkdirSync2(join4(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${utcTimestamp()}] ${msg} + +// dist/src/utils/wiki-log.js +import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join5 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join5(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync2(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} `); - } catch { - } + } catch { + } + } + }; } + +// dist/src/hooks/session-start.js +var log3 = (msg) => log("session-start", msg); +var __bundleDir = dirname2(fileURLToPath(import.meta.url)); +var AUTH_CMD = join6(__bundleDir, "commands", "auth-login.js"); +var context = `DEEPLAKE MEMORY: You have TWO memory sources. ALWAYS check BOTH when the user asks you to recall, remember, or look up ANY information: + +1. Your built-in memory (~/.claude/) \u2014 personal per-project notes +2. Deeplake global memory (~/.deeplake/memory/) \u2014 global memory shared across all sessions, users, and agents in the org + +Deeplake memory structure: +- ~/.deeplake/memory/index.md \u2014 START HERE, table of all sessions +- ~/.deeplake/memory/summaries/username/*.md \u2014 AI-generated wiki summaries per session +- ~/.deeplake/memory/sessions/username/*.jsonl \u2014 raw session data (last resort) + +SEARCH STRATEGY: Always read index.md first. Then read specific summaries. Only read raw JSONL if summaries don't have enough detail. Do NOT jump straight to JSONL files. + +Search command: Grep pattern="keyword" path="~/.deeplake/memory" + +Organization management \u2014 each argument is SEPARATE (do NOT quote subcommands together): +- node "HIVEMIND_AUTH_CMD" login \u2014 SSO login +- node "HIVEMIND_AUTH_CMD" whoami \u2014 show current user/org +- node "HIVEMIND_AUTH_CMD" org list \u2014 list organizations +- node "HIVEMIND_AUTH_CMD" org switch \u2014 switch organization +- node "HIVEMIND_AUTH_CMD" workspaces \u2014 list workspaces +- node "HIVEMIND_AUTH_CMD" workspace \u2014 switch workspace +- node "HIVEMIND_AUTH_CMD" invite \u2014 invite member (ALWAYS ask user which role before inviting) +- node "HIVEMIND_AUTH_CMD" members \u2014 list members +- node "HIVEMIND_AUTH_CMD" remove \u2014 remove member + +IMPORTANT: Only use bash commands (cat, ls, grep, echo, jq, head, tail, etc.) to interact with ~/.deeplake/memory/. Do NOT use python, python3, node, curl, or other interpreters \u2014 they are not available in the memory filesystem. If a task seems to require Python, rewrite it using bash commands and standard text-processing tools (awk, sed, jq, grep, etc.). + +LIMITS: Do NOT spawn subagents to read deeplake memory. If a file returns empty after 2 attempts, skip it and move on. Report what you found rather than exhaustively retrying. + +Debugging: Set HIVEMIND_DEBUG=1 to enable verbose logging to ~/.deeplake/hook-debug.log`; +var HOME = homedir4(); +var { log: wikiLog } = makeWikiLogger(join6(HOME, ".claude", "hooks")); async function createPlaceholder(api, table, sessionId, cwd, userName, orgName, workspaceId) { const summaryPath = `/summaries/${userName}/${sessionId}.md`; const existing = await api.query(`SELECT path FROM "${table}" WHERE path = '${sqlStr(summaryPath)}' LIMIT 1`); @@ -484,7 +497,7 @@ async function main() { const autoupdate = creds?.autoupdate !== false; let updateNotice = ""; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".claude-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { @@ -495,11 +508,11 @@ async function main() { const cmd = scopes.map((s) => `claude plugin update hivemind@hivemind --scope ${s} 2>/dev/null || true`).join("; "); execSync2(cmd, { stdio: "ignore", timeout: 6e4 }); try { - const cacheParent = join4(homedir4(), ".claude", "plugins", "cache", "hivemind", "hivemind"); + const cacheParent = join6(homedir4(), ".claude", "plugins", "cache", "hivemind", "hivemind"); const entries = readdirSync(cacheParent, { withFileTypes: true }); for (const e of entries) { if (e.isDirectory() && e.name !== latest) { - rmSync(join4(cacheParent, e.name), { recursive: true, force: true }); + rmSync(join6(cacheParent, e.name), { recursive: true, force: true }); log3(`cache cleanup: removed old version ${e.name}`); } } diff --git a/codex/bundle/capture.js b/codex/bundle/capture.js index 719205e..b449e10 100755 --- a/codex/bundle/capture.js +++ b/codex/bundle/capture.js @@ -62,6 +62,9 @@ import { join as join2 } from "node:path"; import { homedir as homedir2 } from "node:os"; var DEBUG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; var LOG = join2(homedir2(), ".deeplake", "hook-debug.log"); +function utcTimestamp(d = /* @__PURE__ */ new Date()) { + return d.toISOString().replace("T", " ").slice(0, 19) + " UTC"; +} function log(tag, msg) { if (!DEBUG) return; @@ -299,6 +302,12 @@ var DeeplakeApi = class { } }; +// dist/src/utils/session-path.js +function buildSessionPath(config, sessionId) { + const workspace = config.workspaceId ?? "default"; + return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${workspace}_${sessionId}.jsonl`; +} + // dist/src/hooks/summary-state.js import { readFileSync as readFileSync2, writeFileSync, writeSync, mkdirSync, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir3 } from "node:os"; @@ -436,11 +445,32 @@ function releaseLock(sessionId) { // dist/src/hooks/codex/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; -import { dirname, join as join4 } from "node:path"; -import { writeFileSync as writeFileSync2, mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { dirname, join as join5 } from "node:path"; +import { writeFileSync as writeFileSync2, mkdirSync as mkdirSync3 } from "node:fs"; import { homedir as homedir4, tmpdir } from "node:os"; + +// dist/src/utils/wiki-log.js +import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join4 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join4(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync2(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/codex/spawn-wiki-worker.js var HOME = homedir4(); -var WIKI_LOG = join4(HOME, ".codex", "hooks", "deeplake-wiki.log"); +var wikiLogger = makeWikiLogger(join5(HOME, ".codex", "hooks")); +var WIKI_LOG = wikiLogger.path; var WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge \u2014 entities, decisions, relationships, and facts \u2014 into a structured, searchable wiki entry. SESSION JSONL path: __JSONL__ @@ -490,14 +520,7 @@ Format: **entity** (type) \u2014 what was done with it, its current state> IMPORTANT: Be exhaustive. Extract EVERY entity, decision, and fact. PRIVACY: Never include absolute filesystem paths in the summary. LENGTH LIMIT: Keep the total summary under 4000 characters.`; -function wikiLog(msg) { - try { - mkdirSync2(join4(HOME, ".codex", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").slice(0, 19)}] ${msg} -`); - } catch { - } -} +var wikiLog = wikiLogger.log; function findCodexBin() { try { return execSync("which codex 2>/dev/null", { encoding: "utf-8" }).trim(); @@ -508,9 +531,9 @@ function findCodexBin() { function spawnCodexWikiWorker(opts) { const { config, sessionId, cwd, bundleDir, reason } = opts; const projectName = cwd.split("/").pop() || "unknown"; - const tmpDir = join4(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); - mkdirSync2(tmpDir, { recursive: true }); - const configFile = join4(tmpDir, "config.json"); + const tmpDir = join5(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); + mkdirSync3(tmpDir, { recursive: true }); + const configFile = join5(tmpDir, "config.json"); writeFileSync2(configFile, JSON.stringify({ apiUrl: config.apiUrl, token: config.token, @@ -524,11 +547,11 @@ function spawnCodexWikiWorker(opts) { tmpDir, codexBin: findCodexBin(), wikiLog: WIKI_LOG, - hooksDir: join4(HOME, ".codex", "hooks"), + hooksDir: join5(HOME, ".codex", "hooks"), promptTemplate: WIKI_PROMPT_TEMPLATE })); wikiLog(`${reason}: spawning summary worker for ${sessionId}`); - const workerPath = join4(bundleDir, "wiki-worker.js"); + const workerPath = join5(bundleDir, "wiki-worker.js"); spawn("nohup", ["node", workerPath, configFile], { detached: true, stdio: ["ignore", "ignore", "ignore"] @@ -542,9 +565,6 @@ function bundleDirFromImportMeta(importMetaUrl) { // dist/src/hooks/codex/capture.js var log3 = (msg) => log("codex-capture", msg); var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -function buildSessionPath(config, sessionId) { - return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; -} async function main() { if (!CAPTURE) return; diff --git a/codex/bundle/session-start-setup.js b/codex/bundle/session-start-setup.js index 461393b..02be970 100755 --- a/codex/bundle/session-start-setup.js +++ b/codex/bundle/session-start-setup.js @@ -2,8 +2,7 @@ // dist/src/hooks/codex/session-start-setup.js import { fileURLToPath } from "node:url"; -import { dirname, join as join4 } from "node:path"; -import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2, readFileSync as readFileSync3 } from "node:fs"; +import { dirname as dirname2, join as join6 } from "node:path"; import { execSync as execSync2 } from "node:child_process"; import { homedir as homedir4 } from "node:os"; @@ -74,6 +73,9 @@ import { join as join3 } from "node:path"; import { homedir as homedir3 } from "node:os"; var DEBUG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; var LOG = join3(homedir3(), ".deeplake", "hook-debug.log"); +function utcTimestamp(d = /* @__PURE__ */ new Date()) { + return d.toISOString().replace("T", " ").slice(0, 19) + " UTC"; +} function log(tag, msg) { if (!DEBUG) return; @@ -328,30 +330,19 @@ function readStdin() { }); } -// dist/src/hooks/codex/session-start-setup.js -var log3 = (msg) => log("codex-session-setup", msg); -var __bundleDir = dirname(fileURLToPath(import.meta.url)); +// dist/src/utils/version-check.js +import { readFileSync as readFileSync3 } from "node:fs"; +import { dirname, join as join4 } from "node:path"; var GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -var VERSION_CHECK_TIMEOUT = 3e3; -var HOME = homedir4(); -var WIKI_LOG = join4(HOME, ".codex", "hooks", "deeplake-wiki.log"); -function wikiLog(msg) { +function getInstalledVersion(bundleDir, pluginManifestDir) { try { - mkdirSync2(join4(HOME, ".codex", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").slice(0, 19)}] ${msg} -`); - } catch { - } -} -function getInstalledVersion() { - try { - const pluginJson = join4(__bundleDir, "..", ".codex-plugin", "plugin.json"); + const pluginJson = join4(bundleDir, "..", pluginManifestDir, "plugin.json"); const plugin = JSON.parse(readFileSync3(pluginJson, "utf-8")); if (plugin.version) return plugin.version; } catch { } - let dir = __bundleDir; + let dir = bundleDir; for (let i = 0; i < 5; i++) { const candidate = join4(dir, "package.json"); try { @@ -367,9 +358,9 @@ function getInstalledVersion() { } return null; } -async function getLatestVersion() { +async function getLatestVersion(timeoutMs = 3e3) { try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); + const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(timeoutMs) }); if (!res.ok) return null; const pkg = await res.json(); @@ -384,6 +375,29 @@ function isNewer(latest, current) { const [ca, cb, cc] = parse(current); return la > ca || la === ca && lb > cb || la === ca && lb === cb && lc > cc; } + +// dist/src/utils/wiki-log.js +import { mkdirSync as mkdirSync2, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join5 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join5(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync2(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/codex/session-start-setup.js +var log3 = (msg) => log("codex-session-setup", msg); +var __bundleDir = dirname2(fileURLToPath(import.meta.url)); +var { log: wikiLog } = makeWikiLogger(join6(homedir4(), ".codex", "hooks")); async function createPlaceholder(api, table, sessionId, cwd, userName, orgName, workspaceId) { const summaryPath = `/summaries/${userName}/${sessionId}.md`; const existing = await api.query(`SELECT path FROM "${table}" WHERE path = '${sqlStr(summaryPath)}' LIMIT 1`); @@ -444,7 +458,7 @@ async function main() { } const autoupdate = creds.autoupdate !== false; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".codex-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { diff --git a/codex/bundle/session-start.js b/codex/bundle/session-start.js index 83d4530..fe5cfe1 100755 --- a/codex/bundle/session-start.js +++ b/codex/bundle/session-start.js @@ -3,8 +3,7 @@ // dist/src/hooks/codex/session-start.js import { spawn } from "node:child_process"; import { fileURLToPath } from "node:url"; -import { dirname, join as join3 } from "node:path"; -import { readFileSync as readFileSync2 } from "node:fs"; +import { dirname as dirname2, join as join4 } from "node:path"; // dist/src/commands/auth.js import { readFileSync, writeFileSync, existsSync, mkdirSync, unlinkSync } from "node:fs"; @@ -53,25 +52,18 @@ function log(tag, msg) { `); } -// dist/src/hooks/codex/session-start.js -var log2 = (msg) => log("codex-session-start", msg); -var __bundleDir = dirname(fileURLToPath(import.meta.url)); -var AUTH_CMD = join3(__bundleDir, "commands", "auth-login.js"); -var context = `DEEPLAKE MEMORY: Persistent memory at ~/.deeplake/memory/ shared across sessions, users, and agents. - -Structure: index.md (start here) \u2192 summaries/*.md \u2192 sessions/*.jsonl (last resort). Do NOT jump straight to JSONL. -Search: grep -r "keyword" ~/.deeplake/memory/ -IMPORTANT: Only use bash commands (cat, ls, grep, echo, jq, head, tail, sed, awk, etc.) to interact with ~/.deeplake/memory/. Do NOT use python, python3, node, curl, or other interpreters \u2014 they are not available in the memory filesystem. -Do NOT spawn subagents to read deeplake memory.`; -function getInstalledVersion() { +// dist/src/utils/version-check.js +import { readFileSync as readFileSync2 } from "node:fs"; +import { dirname, join as join3 } from "node:path"; +function getInstalledVersion(bundleDir, pluginManifestDir) { try { - const pluginJson = join3(__bundleDir, "..", ".codex-plugin", "plugin.json"); + const pluginJson = join3(bundleDir, "..", pluginManifestDir, "plugin.json"); const plugin = JSON.parse(readFileSync2(pluginJson, "utf-8")); if (plugin.version) return plugin.version; } catch { } - let dir = __bundleDir; + let dir = bundleDir; for (let i = 0; i < 5; i++) { const candidate = join3(dir, "package.json"); try { @@ -87,6 +79,17 @@ function getInstalledVersion() { } return null; } + +// dist/src/hooks/codex/session-start.js +var log2 = (msg) => log("codex-session-start", msg); +var __bundleDir = dirname2(fileURLToPath(import.meta.url)); +var AUTH_CMD = join4(__bundleDir, "commands", "auth-login.js"); +var context = `DEEPLAKE MEMORY: Persistent memory at ~/.deeplake/memory/ shared across sessions, users, and agents. + +Structure: index.md (start here) \u2192 summaries/*.md \u2192 sessions/*.jsonl (last resort). Do NOT jump straight to JSONL. +Search: grep -r "keyword" ~/.deeplake/memory/ +IMPORTANT: Only use bash commands (cat, ls, grep, echo, jq, head, tail, sed, awk, etc.) to interact with ~/.deeplake/memory/. Do NOT use python, python3, node, curl, or other interpreters \u2014 they are not available in the memory filesystem. +Do NOT spawn subagents to read deeplake memory.`; async function main() { if (process.env.HIVEMIND_WIKI_WORKER === "1") return; @@ -98,7 +101,7 @@ async function main() { log2(`credentials loaded: org=${creds.orgName ?? creds.orgId}`); } if (creds?.token) { - const setupScript = join3(__bundleDir, "session-start-setup.js"); + const setupScript = join4(__bundleDir, "session-start-setup.js"); const child = spawn("node", [setupScript], { detached: true, stdio: ["pipe", "ignore", "ignore"], @@ -110,7 +113,7 @@ async function main() { log2("spawned async setup process"); } let versionNotice = ""; - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".codex-plugin"); if (current) { versionNotice = ` Hivemind v${current}`; diff --git a/codex/bundle/stop.js b/codex/bundle/stop.js index e5b3c8b..2de7118 100755 --- a/codex/bundle/stop.js +++ b/codex/bundle/stop.js @@ -65,6 +65,9 @@ import { join as join2 } from "node:path"; import { homedir as homedir2 } from "node:os"; var DEBUG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; var LOG = join2(homedir2(), ".deeplake", "hook-debug.log"); +function utcTimestamp(d = /* @__PURE__ */ new Date()) { + return d.toISOString().replace("T", " ").slice(0, 19) + " UTC"; +} function log(tag, msg) { if (!DEBUG) return; @@ -305,11 +308,32 @@ var DeeplakeApi = class { // dist/src/hooks/codex/spawn-wiki-worker.js import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; -import { dirname, join as join3 } from "node:path"; -import { writeFileSync, mkdirSync, appendFileSync as appendFileSync2 } from "node:fs"; +import { dirname, join as join4 } from "node:path"; +import { writeFileSync, mkdirSync as mkdirSync2 } from "node:fs"; import { homedir as homedir3, tmpdir } from "node:os"; + +// dist/src/utils/wiki-log.js +import { mkdirSync, appendFileSync as appendFileSync2 } from "node:fs"; +import { join as join3 } from "node:path"; +function makeWikiLogger(hooksDir, filename = "deeplake-wiki.log") { + const path = join3(hooksDir, filename); + return { + path, + log(msg) { + try { + mkdirSync(hooksDir, { recursive: true }); + appendFileSync2(path, `[${utcTimestamp()}] ${msg} +`); + } catch { + } + } + }; +} + +// dist/src/hooks/codex/spawn-wiki-worker.js var HOME = homedir3(); -var WIKI_LOG = join3(HOME, ".codex", "hooks", "deeplake-wiki.log"); +var wikiLogger = makeWikiLogger(join4(HOME, ".codex", "hooks")); +var WIKI_LOG = wikiLogger.path; var WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge \u2014 entities, decisions, relationships, and facts \u2014 into a structured, searchable wiki entry. SESSION JSONL path: __JSONL__ @@ -359,14 +383,7 @@ Format: **entity** (type) \u2014 what was done with it, its current state> IMPORTANT: Be exhaustive. Extract EVERY entity, decision, and fact. PRIVACY: Never include absolute filesystem paths in the summary. LENGTH LIMIT: Keep the total summary under 4000 characters.`; -function wikiLog(msg) { - try { - mkdirSync(join3(HOME, ".codex", "hooks"), { recursive: true }); - appendFileSync2(WIKI_LOG, `[${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").slice(0, 19)}] ${msg} -`); - } catch { - } -} +var wikiLog = wikiLogger.log; function findCodexBin() { try { return execSync("which codex 2>/dev/null", { encoding: "utf-8" }).trim(); @@ -377,9 +394,9 @@ function findCodexBin() { function spawnCodexWikiWorker(opts) { const { config, sessionId, cwd, bundleDir, reason } = opts; const projectName = cwd.split("/").pop() || "unknown"; - const tmpDir = join3(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); - mkdirSync(tmpDir, { recursive: true }); - const configFile = join3(tmpDir, "config.json"); + const tmpDir = join4(tmpdir(), `deeplake-wiki-${sessionId}-${Date.now()}`); + mkdirSync2(tmpDir, { recursive: true }); + const configFile = join4(tmpDir, "config.json"); writeFileSync(configFile, JSON.stringify({ apiUrl: config.apiUrl, token: config.token, @@ -393,11 +410,11 @@ function spawnCodexWikiWorker(opts) { tmpDir, codexBin: findCodexBin(), wikiLog: WIKI_LOG, - hooksDir: join3(HOME, ".codex", "hooks"), + hooksDir: join4(HOME, ".codex", "hooks"), promptTemplate: WIKI_PROMPT_TEMPLATE })); wikiLog(`${reason}: spawning summary worker for ${sessionId}`); - const workerPath = join3(bundleDir, "wiki-worker.js"); + const workerPath = join4(bundleDir, "wiki-worker.js"); spawn("nohup", ["node", workerPath, configFile], { detached: true, stdio: ["ignore", "ignore", "ignore"] @@ -409,17 +426,17 @@ function bundleDirFromImportMeta(importMetaUrl) { } // dist/src/hooks/summary-state.js -import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync2, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; +import { readFileSync as readFileSync2, writeFileSync as writeFileSync2, writeSync, mkdirSync as mkdirSync3, renameSync, existsSync as existsSync2, unlinkSync, openSync, closeSync } from "node:fs"; import { homedir as homedir4 } from "node:os"; -import { join as join4 } from "node:path"; +import { join as join5 } from "node:path"; var dlog = (msg) => log("summary-state", msg); -var STATE_DIR = join4(homedir4(), ".claude", "hooks", "summary-state"); +var STATE_DIR = join5(homedir4(), ".claude", "hooks", "summary-state"); var YIELD_BUF = new Int32Array(new SharedArrayBuffer(4)); function lockPath(sessionId) { - return join4(STATE_DIR, `${sessionId}.lock`); + return join5(STATE_DIR, `${sessionId}.lock`); } function tryAcquireLock(sessionId, maxAgeMs = 10 * 60 * 1e3) { - mkdirSync2(STATE_DIR, { recursive: true }); + mkdirSync3(STATE_DIR, { recursive: true }); const p = lockPath(sessionId); if (existsSync2(p)) { try { @@ -460,12 +477,15 @@ function releaseLock(sessionId) { } } +// dist/src/utils/session-path.js +function buildSessionPath(config, sessionId) { + const workspace = config.workspaceId ?? "default"; + return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${workspace}_${sessionId}.jsonl`; +} + // dist/src/hooks/codex/stop.js var log3 = (msg) => log("codex-stop", msg); var CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -function buildSessionPath(config, sessionId) { - return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; -} async function main() { if (process.env.HIVEMIND_WIKI_WORKER === "1") return; diff --git a/src/hooks/capture.ts b/src/hooks/capture.ts index 2628bd5..81c8385 100644 --- a/src/hooks/capture.ts +++ b/src/hooks/capture.ts @@ -12,6 +12,7 @@ import { loadConfig, type Config } from "../config.js"; import { DeeplakeApi } from "../deeplake-api.js"; import { sqlStr } from "../utils/sql.js"; import { log as _log } from "../utils/debug.js"; +import { buildSessionPath } from "../utils/session-path.js"; import { bumpTotalCount, loadTriggerConfig, @@ -45,16 +46,6 @@ interface HookInput { const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -/** Build the session path matching the CLI convention: - * /sessions//___.jsonl */ -function buildSessionPath(config: { userName: string; orgName: string; workspaceId: string }, sessionId: string): string { - const userName = config.userName; - const orgName = config.orgName; - const workspace = config.workspaceId ?? "default"; - - return `/sessions/${userName}/${userName}_${orgName}_${workspace}_${sessionId}.jsonl`; -} - async function main(): Promise { if (!CAPTURE) return; const input = await readStdin(); diff --git a/src/hooks/codex/capture.ts b/src/hooks/codex/capture.ts index 5414617..0c80802 100644 --- a/src/hooks/codex/capture.ts +++ b/src/hooks/codex/capture.ts @@ -17,6 +17,7 @@ import { loadConfig, type Config } from "../../config.js"; import { DeeplakeApi } from "../../deeplake-api.js"; import { sqlStr } from "../../utils/sql.js"; import { log as _log } from "../../utils/debug.js"; +import { buildSessionPath } from "../../utils/session-path.js"; import { bumpTotalCount, loadTriggerConfig, @@ -45,10 +46,6 @@ interface CodexHookInput { const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -function buildSessionPath(config: { userName: string; orgName: string; workspaceId: string }, sessionId: string): string { - return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; -} - async function main(): Promise { if (!CAPTURE) return; const input = await readStdin(); diff --git a/src/hooks/codex/session-start-setup.ts b/src/hooks/codex/session-start-setup.ts index 395ee97..8dfb984 100644 --- a/src/hooks/codex/session-start-setup.ts +++ b/src/hooks/codex/session-start-setup.ts @@ -8,7 +8,6 @@ import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { mkdirSync, appendFileSync, readFileSync } from "node:fs"; import { execSync } from "node:child_process"; import { homedir } from "node:os"; import { loadCredentials, saveCredentials } from "../../commands/auth.js"; @@ -17,60 +16,12 @@ import { DeeplakeApi } from "../../deeplake-api.js"; import { sqlStr } from "../../utils/sql.js"; import { readStdin } from "../../utils/stdin.js"; import { log as _log } from "../../utils/debug.js"; +import { getInstalledVersion, getLatestVersion, isNewer } from "../../utils/version-check.js"; +import { makeWikiLogger } from "../../utils/wiki-log.js"; const log = (msg: string) => _log("codex-session-setup", msg); const __bundleDir = dirname(fileURLToPath(import.meta.url)); - -const GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -const VERSION_CHECK_TIMEOUT = 3000; - -const HOME = homedir(); -const WIKI_LOG = join(HOME, ".codex", "hooks", "deeplake-wiki.log"); - -function wikiLog(msg: string): void { - try { - mkdirSync(join(HOME, ".codex", "hooks"), { recursive: true }); - appendFileSync(WIKI_LOG, `[${new Date().toISOString().replace("T", " ").slice(0, 19)}] ${msg}\n`); - } catch { /* ignore */ } -} - -function getInstalledVersion(): string | null { - try { - const pluginJson = join(__bundleDir, "..", ".codex-plugin", "plugin.json"); - const plugin = JSON.parse(readFileSync(pluginJson, "utf-8")); - if (plugin.version) return plugin.version; - } catch { /* fall through */ } - let dir = __bundleDir; - for (let i = 0; i < 5; i++) { - const candidate = join(dir, "package.json"); - try { - const pkg = JSON.parse(readFileSync(candidate, "utf-8")); - if ((pkg.name === "hivemind" || pkg.name === "hivemind-codex") && pkg.version) return pkg.version; - } catch { /* not here, keep looking */ } - const parent = dirname(dir); - if (parent === dir) break; - dir = parent; - } - return null; -} - -async function getLatestVersion(): Promise { - try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); - if (!res.ok) return null; - const pkg = await res.json(); - return pkg.version ?? null; - } catch { - return null; - } -} - -function isNewer(latest: string, current: string): boolean { - const parse = (v: string) => v.split(".").map(Number); - const [la, lb, lc] = parse(latest); - const [ca, cb, cc] = parse(current); - return la > ca || (la === ca && lb > cb) || (la === ca && lb === cb && lc > cc); -} +const { log: wikiLog } = makeWikiLogger(join(homedir(), ".codex", "hooks")); /** Create a placeholder summary via direct SQL INSERT. */ async function createPlaceholder(api: DeeplakeApi, table: string, sessionId: string, cwd: string, userName: string, orgName: string, workspaceId: string): Promise { @@ -155,7 +106,7 @@ async function main(): Promise { // Version check + auto-update const autoupdate = creds.autoupdate !== false; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".codex-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { diff --git a/src/hooks/codex/session-start.ts b/src/hooks/codex/session-start.ts index be72587..81d25e4 100644 --- a/src/hooks/codex/session-start.ts +++ b/src/hooks/codex/session-start.ts @@ -13,10 +13,10 @@ import { spawn } from "node:child_process"; import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { readFileSync } from "node:fs"; import { loadCredentials } from "../../commands/auth.js"; import { readStdin } from "../../utils/stdin.js"; import { log as _log } from "../../utils/debug.js"; +import { getInstalledVersion } from "../../utils/version-check.js"; const log = (msg: string) => _log("codex-session-start", msg); const __bundleDir = dirname(fileURLToPath(import.meta.url)); @@ -29,26 +29,6 @@ Search: grep -r "keyword" ~/.deeplake/memory/ IMPORTANT: Only use bash commands (cat, ls, grep, echo, jq, head, tail, sed, awk, etc.) to interact with ~/.deeplake/memory/. Do NOT use python, python3, node, curl, or other interpreters — they are not available in the memory filesystem. Do NOT spawn subagents to read deeplake memory.`; -function getInstalledVersion(): string | null { - try { - const pluginJson = join(__bundleDir, "..", ".codex-plugin", "plugin.json"); - const plugin = JSON.parse(readFileSync(pluginJson, "utf-8")); - if (plugin.version) return plugin.version; - } catch { /* fall through */ } - let dir = __bundleDir; - for (let i = 0; i < 5; i++) { - const candidate = join(dir, "package.json"); - try { - const pkg = JSON.parse(readFileSync(candidate, "utf-8")); - if ((pkg.name === "hivemind" || pkg.name === "hivemind-codex") && pkg.version) return pkg.version; - } catch { /* not here, keep looking */ } - const parent = dirname(dir); - if (parent === dir) break; - dir = parent; - } - return null; -} - interface CodexSessionStartInput { session_id: string; transcript_path?: string | null; @@ -88,7 +68,7 @@ async function main(): Promise { } let versionNotice = ""; - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".codex-plugin"); if (current) { versionNotice = `\nHivemind v${current}`; } diff --git a/src/hooks/codex/spawn-wiki-worker.ts b/src/hooks/codex/spawn-wiki-worker.ts index d7c57e1..06bc89e 100644 --- a/src/hooks/codex/spawn-wiki-worker.ts +++ b/src/hooks/codex/spawn-wiki-worker.ts @@ -6,12 +6,14 @@ import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { writeFileSync, mkdirSync, appendFileSync } from "node:fs"; +import { writeFileSync, mkdirSync } from "node:fs"; import { homedir, tmpdir } from "node:os"; import type { Config } from "../../config.js"; +import { makeWikiLogger } from "../../utils/wiki-log.js"; const HOME = homedir(); -export const WIKI_LOG = join(HOME, ".codex", "hooks", "deeplake-wiki.log"); +const wikiLogger = makeWikiLogger(join(HOME, ".codex", "hooks")); +export const WIKI_LOG = wikiLogger.path; export const WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge — entities, decisions, relationships, and facts — into a structured, searchable wiki entry. @@ -63,12 +65,7 @@ IMPORTANT: Be exhaustive. Extract EVERY entity, decision, and fact. PRIVACY: Never include absolute filesystem paths in the summary. LENGTH LIMIT: Keep the total summary under 4000 characters.`; -export function wikiLog(msg: string): void { - try { - mkdirSync(join(HOME, ".codex", "hooks"), { recursive: true }); - appendFileSync(WIKI_LOG, `[${new Date().toISOString().replace("T", " ").slice(0, 19)}] ${msg}\n`); - } catch { /* ignore */ } -} +export const wikiLog = wikiLogger.log; export function findCodexBin(): string { try { diff --git a/src/hooks/codex/stop.ts b/src/hooks/codex/stop.ts index 8c2820e..39eb330 100644 --- a/src/hooks/codex/stop.ts +++ b/src/hooks/codex/stop.ts @@ -19,6 +19,7 @@ import { sqlStr } from "../../utils/sql.js"; import { log as _log } from "../../utils/debug.js"; import { bundleDirFromImportMeta, spawnCodexWikiWorker, wikiLog } from "./spawn-wiki-worker.js"; import { tryAcquireLock, releaseLock } from "../summary-state.js"; +import { buildSessionPath } from "../../utils/session-path.js"; const log = (msg: string) => _log("codex-stop", msg); @@ -32,10 +33,6 @@ interface CodexStopInput { const CAPTURE = process.env.HIVEMIND_CAPTURE !== "false"; -function buildSessionPath(config: { userName: string; orgName: string; workspaceId: string }, sessionId: string): string { - return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${config.workspaceId}_${sessionId}.jsonl`; -} - async function main(): Promise { if (process.env.HIVEMIND_WIKI_WORKER === "1") return; diff --git a/src/hooks/session-start-setup.ts b/src/hooks/session-start-setup.ts index 7065fde..f78ceb0 100644 --- a/src/hooks/session-start-setup.ts +++ b/src/hooks/session-start-setup.ts @@ -8,68 +8,19 @@ import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { mkdirSync, appendFileSync, readFileSync } from "node:fs"; import { execSync } from "node:child_process"; import { homedir } from "node:os"; import { loadCredentials, saveCredentials } from "../commands/auth.js"; import { loadConfig } from "../config.js"; import { DeeplakeApi } from "../deeplake-api.js"; import { readStdin } from "../utils/stdin.js"; -import { log as _log, utcTimestamp } from "../utils/debug.js"; +import { log as _log } from "../utils/debug.js"; +import { getInstalledVersion, getLatestVersion, isNewer } from "../utils/version-check.js"; +import { makeWikiLogger } from "../utils/wiki-log.js"; const log = (msg: string) => _log("session-setup", msg); const __bundleDir = dirname(fileURLToPath(import.meta.url)); - -const GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -const VERSION_CHECK_TIMEOUT = 3000; - -const HOME = homedir(); -const WIKI_LOG = join(HOME, ".claude", "hooks", "deeplake-wiki.log"); - -function wikiLog(msg: string): void { - try { - mkdirSync(join(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync(WIKI_LOG, `[${utcTimestamp()}] ${msg}\n`); - } catch { /* ignore */ } -} - -function getInstalledVersion(): string | null { - try { - const pluginJson = join(__bundleDir, "..", ".claude-plugin", "plugin.json"); - const plugin = JSON.parse(readFileSync(pluginJson, "utf-8")); - if (plugin.version) return plugin.version; - } catch { /* fall through */ } - let dir = __bundleDir; - for (let i = 0; i < 5; i++) { - const candidate = join(dir, "package.json"); - try { - const pkg = JSON.parse(readFileSync(candidate, "utf-8")); - if ((pkg.name === "hivemind" || pkg.name === "hivemind-codex") && pkg.version) return pkg.version; - } catch { /* not here, keep looking */ } - const parent = dirname(dir); - if (parent === dir) break; - dir = parent; - } - return null; -} - -async function getLatestVersion(): Promise { - try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); - if (!res.ok) return null; - const pkg = await res.json(); - return pkg.version ?? null; - } catch { - return null; - } -} - -function isNewer(latest: string, current: string): boolean { - const parse = (v: string) => v.split(".").map(Number); - const [la, lb, lc] = parse(latest); - const [ca, cb, cc] = parse(current); - return la > ca || (la === ca && lb > cb) || (la === ca && lb === cb && lc > cc); -} +const { log: wikiLog } = makeWikiLogger(join(homedir(), ".claude", "hooks")); interface SessionStartInput { session_id: string; @@ -111,7 +62,7 @@ async function main(): Promise { // Version check + auto-update const autoupdate = creds.autoupdate !== false; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".claude-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { diff --git a/src/hooks/session-start.ts b/src/hooks/session-start.ts index 73c9cf1..60e402b 100644 --- a/src/hooks/session-start.ts +++ b/src/hooks/session-start.ts @@ -8,7 +8,7 @@ import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { mkdirSync, appendFileSync, readFileSync, readdirSync, rmSync } from "node:fs"; +import { readdirSync, rmSync } from "node:fs"; import { execSync } from "node:child_process"; import { homedir } from "node:os"; import { loadCredentials, saveCredentials, login } from "../commands/auth.js"; @@ -16,7 +16,9 @@ import { loadConfig } from "../config.js"; import { DeeplakeApi } from "../deeplake-api.js"; import { sqlStr } from "../utils/sql.js"; import { readStdin } from "../utils/stdin.js"; -import { log as _log, utcTimestamp } from "../utils/debug.js"; +import { log as _log } from "../utils/debug.js"; +import { getInstalledVersion, getLatestVersion, isNewer } from "../utils/version-check.js"; +import { makeWikiLogger } from "../utils/wiki-log.js"; const log = (msg: string) => _log("session-start", msg); const __bundleDir = dirname(fileURLToPath(import.meta.url)); @@ -53,61 +55,8 @@ LIMITS: Do NOT spawn subagents to read deeplake memory. If a file returns empty Debugging: Set HIVEMIND_DEBUG=1 to enable verbose logging to ~/.deeplake/hook-debug.log`; -const GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; -const VERSION_CHECK_TIMEOUT = 3000; // 3s — don't block session start - -function getInstalledVersion(): string | null { - // Try plugin manifest first (works in both cache and marketplace layouts) - try { - const pluginJson = join(__bundleDir, "..", ".claude-plugin", "plugin.json"); - const plugin = JSON.parse(readFileSync(pluginJson, "utf-8")); - if (plugin.version) return plugin.version; - } catch { /* fall through */ } - // Walk up from the bundle directory to find the nearest package.json. - // Depending on install method the layout varies: - // marketplace: /claude-code/bundle/ → package.json is 2 levels up - // cache: /bundle/ → package.json is 1 level up (if present) - let dir = __bundleDir; - for (let i = 0; i < 5; i++) { - const candidate = join(dir, "package.json"); - try { - const pkg = JSON.parse(readFileSync(candidate, "utf-8")); - if ((pkg.name === "hivemind" || pkg.name === "hivemind-codex") && pkg.version) return pkg.version; - } catch { /* not here, keep looking */ } - const parent = dirname(dir); - if (parent === dir) break; // reached filesystem root - dir = parent; - } - return null; -} - -async function getLatestVersion(): Promise { - try { - const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(VERSION_CHECK_TIMEOUT) }); - if (!res.ok) return null; - const pkg = await res.json(); - return pkg.version ?? null; - } catch { - return null; - } -} - -function isNewer(latest: string, current: string): boolean { - const parse = (v: string) => v.split(".").map(Number); - const [la, lb, lc] = parse(latest); - const [ca, cb, cc] = parse(current); - return la > ca || (la === ca && lb > cb) || (la === ca && lb === cb && lc > cc); -} - const HOME = homedir(); -const WIKI_LOG = join(HOME, ".claude", "hooks", "deeplake-wiki.log"); - -function wikiLog(msg: string): void { - try { - mkdirSync(join(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync(WIKI_LOG, `[${utcTimestamp()}] ${msg}\n`); - } catch { /* ignore */ } -} +const { log: wikiLog } = makeWikiLogger(join(HOME, ".claude", "hooks")); /** Create a placeholder summary via direct SQL INSERT (no DeeplakeFs bootstrap needed). */ async function createPlaceholder(api: DeeplakeApi, table: string, sessionId: string, cwd: string, userName: string, orgName: string, workspaceId: string): Promise { @@ -203,7 +152,7 @@ async function main(): Promise { const autoupdate = creds?.autoupdate !== false; // default: true let updateNotice = ""; try { - const current = getInstalledVersion(); + const current = getInstalledVersion(__bundleDir, ".claude-plugin"); if (current) { const latest = await getLatestVersion(); if (latest && isNewer(latest, current)) { diff --git a/src/hooks/spawn-wiki-worker.ts b/src/hooks/spawn-wiki-worker.ts index a2440b6..b870bb4 100644 --- a/src/hooks/spawn-wiki-worker.ts +++ b/src/hooks/spawn-wiki-worker.ts @@ -6,13 +6,14 @@ import { spawn, execSync } from "node:child_process"; import { fileURLToPath } from "node:url"; import { dirname, join } from "node:path"; -import { writeFileSync, mkdirSync, appendFileSync } from "node:fs"; +import { writeFileSync, mkdirSync } from "node:fs"; import { homedir, tmpdir } from "node:os"; import type { Config } from "../config.js"; -import { utcTimestamp } from "../utils/debug.js"; +import { makeWikiLogger } from "../utils/wiki-log.js"; const HOME = homedir(); -export const WIKI_LOG = join(HOME, ".claude", "hooks", "deeplake-wiki.log"); +const wikiLogger = makeWikiLogger(join(HOME, ".claude", "hooks")); +export const WIKI_LOG = wikiLogger.path; export const WIKI_PROMPT_TEMPLATE = `You are building a personal wiki from a coding session. Your goal is to extract every piece of knowledge — entities, decisions, relationships, and facts — into a structured, searchable wiki entry. Think of this as building a knowledge graph, not writing a summary. @@ -67,12 +68,7 @@ PRIVACY: Never include absolute filesystem paths (e.g. /home/user/..., /Users/.. LENGTH LIMIT: Keep the total summary under 4000 characters. Be dense and concise — prioritize facts over prose. If a session is short, the summary should be short too.`; -export function wikiLog(msg: string): void { - try { - mkdirSync(join(HOME, ".claude", "hooks"), { recursive: true }); - appendFileSync(WIKI_LOG, `[${utcTimestamp()}] ${msg}\n`); - } catch { /* ignore */ } -} +export const wikiLog = wikiLogger.log; export function findClaudeBin(): string { try { diff --git a/src/utils/session-path.ts b/src/utils/session-path.ts new file mode 100644 index 0000000..e4b8e5b --- /dev/null +++ b/src/utils/session-path.ts @@ -0,0 +1,13 @@ +/** + * Canonical session JSONL path. Used by every capture hook (CC + Codex) + * and by the placeholder / summary paths in session-start. Keeping it + * in one place prevents the 4-tuple `{userName, orgName, workspaceId, + * sessionId}` from ever being re-assembled in the wrong order. + */ +export function buildSessionPath( + config: { userName: string; orgName: string; workspaceId: string }, + sessionId: string, +): string { + const workspace = config.workspaceId ?? "default"; + return `/sessions/${config.userName}/${config.userName}_${config.orgName}_${workspace}_${sessionId}.jsonl`; +} diff --git a/src/utils/version-check.ts b/src/utils/version-check.ts new file mode 100644 index 0000000..e8e14cb --- /dev/null +++ b/src/utils/version-check.ts @@ -0,0 +1,67 @@ +/** + * Shared install-version / latest-version / version-compare helpers. + * Used by both the CC and Codex session-start hooks. Each side differs + * only in the path of its plugin manifest: + * - claude-code → /../.claude-plugin/plugin.json + * - codex → /../.codex-plugin/plugin.json + * Callers pass the plugin-manifest name explicitly. + */ + +import { readFileSync } from "node:fs"; +import { dirname, join } from "node:path"; + +const GITHUB_RAW_PKG = "https://raw.githubusercontent.com/activeloopai/hivemind/main/package.json"; + +/** + * Read the installed plugin version. + * + * Tries `/../plugin.json` first (both the + * cache layout and the marketplace layout pin the version there), then + * walks up from the bundle dir looking for a `package.json` whose name + * is `hivemind` or `hivemind-codex`. Returns null if nothing is found + * — callers treat that as "skip the update check". + */ +export function getInstalledVersion(bundleDir: string, pluginManifestDir: string): string | null { + try { + const pluginJson = join(bundleDir, "..", pluginManifestDir, "plugin.json"); + const plugin = JSON.parse(readFileSync(pluginJson, "utf-8")); + if (plugin.version) return plugin.version; + } catch { /* fall through */ } + let dir = bundleDir; + for (let i = 0; i < 5; i++) { + const candidate = join(dir, "package.json"); + try { + const pkg = JSON.parse(readFileSync(candidate, "utf-8")); + if ((pkg.name === "hivemind" || pkg.name === "hivemind-codex") && pkg.version) return pkg.version; + } catch { /* not here, keep looking */ } + const parent = dirname(dir); + if (parent === dir) break; + dir = parent; + } + return null; +} + +/** + * Fetch the latest version from GitHub (main branch package.json). + * Returns null on any failure — session-start hooks must never block + * on GitHub being reachable, and their callers treat null as "no + * update available". + */ +export async function getLatestVersion(timeoutMs = 3000): Promise { + try { + const res = await fetch(GITHUB_RAW_PKG, { signal: AbortSignal.timeout(timeoutMs) }); + if (!res.ok) return null; + const pkg = await res.json(); + return pkg.version ?? null; + } catch { + return null; + } +} + +/** Strict semantic "latest is greater than current" for dotted x.y.z strings. */ +export function isNewer(latest: string, current: string): boolean { + const parse = (v: string) => v.split(".").map(Number); + const [la, lb, lc] = parse(latest); + const [ca, cb, cc] = parse(current); + return la > ca || (la === ca && lb > cb) || (la === ca && lb === cb && lc > cc); +} diff --git a/src/utils/wiki-log.ts b/src/utils/wiki-log.ts new file mode 100644 index 0000000..a154afb --- /dev/null +++ b/src/utils/wiki-log.ts @@ -0,0 +1,32 @@ +/** + * wikiLog writer factory. Produces a unconditional append-line logger + * that targets a user-visible wiki-log file. Each plugin variant has + * its own path (CC: ~/.claude/hooks/..., Codex: ~/.codex/hooks/...), + * so the caller constructs the logger once by passing HOOKS_DIR. + * + * This is the *user-visible* log — entries like "SessionEnd: + * triggering summary for " land here regardless of HIVEMIND_DEBUG. + * For debug-gated diagnostics use `_log` from src/utils/debug.ts. + */ + +import { mkdirSync, appendFileSync } from "node:fs"; +import { join } from "node:path"; +import { utcTimestamp } from "./debug.js"; + +export interface WikiLogger { + log: (msg: string) => void; + path: string; +} + +export function makeWikiLogger(hooksDir: string, filename = "deeplake-wiki.log"): WikiLogger { + const path = join(hooksDir, filename); + return { + path, + log(msg: string): void { + try { + mkdirSync(hooksDir, { recursive: true }); + appendFileSync(path, `[${utcTimestamp()}] ${msg}\n`); + } catch { /* ignore — a log failure must never crash the hook */ } + }, + }; +} From 0e8df064cc42e0c82e64ce5e97599ef701193aab Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:22:49 +0000 Subject: [PATCH 26/39] ci: duplication check via jscpd (regression guard) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a jscpd step to the CI pipeline that fails if code duplication exceeds 7% of the src/ tree. 7% is the current baseline — the number exists to catch a new clone, not to force a rewrite of the residual duplication in hooks/capture.ts vs hooks/codex/capture.ts (those two handle different event shapes and unifying them would be a separate refactor). - npm run dup runs jscpd on src/ with min 10 lines / 60 tokens - CI uploads jscpd-report/*.md on every run so reviewers can see the exact clone locations when the check fails - .gitignore excludes the report dir - package.json's ci script now chains typecheck → dup → test The markdown report pinpoints every clone with file:line ranges, so when a future PR bumps duplication above the threshold, the reviewer immediately sees which block was copy-pasted where. --- .github/workflows/ci.yml | 15 + .gitignore | 1 + .jscpd.json | 21 + package-lock.json | 1406 +++++++++++++++++++++++++++++++++++++- package.json | 4 +- 5 files changed, 1429 insertions(+), 18 deletions(-) create mode 100644 .jscpd.json diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 665f2a9..ece166b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,6 +34,21 @@ jobs: - name: Typecheck run: npm run typecheck + - name: Duplication check (jscpd) + # Threshold 7% is the current baseline (see .jscpd.json). The job + # fails if a future change pushes duplication above it, so the + # number is a regression guard — reviewers can see the exact + # clones in the markdown report uploaded below. + run: npm run dup + + - name: Upload jscpd report + if: always() + uses: actions/upload-artifact@v4 + with: + name: jscpd-report + path: jscpd-report/ + if-no-files-found: ignore + - name: Run tests with coverage # Per-file 80% thresholds for PR #60 files are declared in # vitest.config.ts under `coverage.thresholds`. Vitest exits non-zero diff --git a/.gitignore b/.gitignore index 4f538ba..b952b68 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ tmp/ .env .env.* coverage/ +jscpd-report/ bench/ .claude/ CLAUDE.md diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 0000000..842983c --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,21 @@ +{ + "threshold": 7, + "reporters": ["console", "markdown"], + "output": "./jscpd-report", + "ignore": [ + "**/node_modules/**", + "**/dist/**", + "**/bundle/**", + "**/coverage/**", + "**/*.test.ts", + "**/tests/**", + "**/fixtures/**", + "**/claude-code/.claude-plugin/**", + "**/codex/.codex-plugin/**" + ], + "absolute": false, + "gitignore": true, + "format": ["typescript"], + "minLines": 10, + "minTokens": 60 +} diff --git a/package-lock.json b/package-lock.json index 7ec599d..c3c6cba 100644 --- a/package-lock.json +++ b/package-lock.json @@ -21,6 +21,7 @@ "@vitest/coverage-v8": "^4.1.3", "esbuild": "^0.28.0", "husky": "^9.1.7", + "jscpd": "^4.0.9", "lint-staged": "^16.4.0", "tsx": "^4.7.0", "typescript": "^6.0.0", @@ -1057,6 +1058,17 @@ "url": "https://github.com/sponsors/Borewit" } }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, "node_modules/@emnapi/core": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.1.tgz", @@ -1080,6 +1092,16 @@ "tslib": "^2.4.0" } }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@esbuild/aix-ppc64": { "version": "0.28.0", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.28.0.tgz", @@ -2058,6 +2080,71 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@jscpd/badge-reporter": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@jscpd/badge-reporter/-/badge-reporter-4.0.5.tgz", + "integrity": "sha512-SLVhP00R9lkQ//Ivaanfm7k0L9sewpBven670kk1uGec2SWUOa7MVQcuad/TV59KEZ73UIC1lXvi6O9hAnbpUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "badgen": "^3.2.3", + "colors": "^1.4.0", + "fs-extra": "^11.2.0" + } + }, + "node_modules/@jscpd/core": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@jscpd/core/-/core-4.0.5.tgz", + "integrity": "sha512-Udvym21nWzxjYRVXwwpYNBqZ6b50QV2zHN3fFNzOPPg4cfQVYOZerILB7xNDUsXHC1PCr/N52Tq3q7AElvjWWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^5.0.1" + } + }, + "node_modules/@jscpd/finder": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@jscpd/finder/-/finder-4.0.5.tgz", + "integrity": "sha512-/2VkRoVrrfya+51sitZo5I9MdwsRaPKB8X3L3khAYoHFXk4L/mUuG81RmGazDHjUIGg22ItlkQtwzorNZ2+aPw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jscpd/core": "4.0.5", + "@jscpd/tokenizer": "4.0.5", + "blamer": "^1.0.6", + "bytes": "^3.1.2", + "cli-table3": "^0.6.5", + "colors": "^1.4.0", + "fast-glob": "^3.3.2", + "fs-extra": "^11.2.0", + "markdown-table": "^2.0.0", + "pug": "^3.0.3" + } + }, + "node_modules/@jscpd/html-reporter": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@jscpd/html-reporter/-/html-reporter-4.0.5.tgz", + "integrity": "sha512-drK2J8KyPIW9wvaElSIobZFp4dBO9GA++JW4gx3oihvLdDSp8qSo/CNqH47Dw0XkjQTxND3j/+Wz5JWvYRBgFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "colors": "1.4.0", + "fs-extra": "^11.2.0", + "pug": "^3.0.3" + } + }, + "node_modules/@jscpd/tokenizer": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@jscpd/tokenizer/-/tokenizer-4.0.5.tgz", + "integrity": "sha512-WzRujQtN5WedxZVDKuoanxmKAFrxcLrHpcA6kaM4z8AhGtWXZ325yseqgL5TZ8OK7Auwu7kQLlqhfk05fGYG7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jscpd/core": "4.0.5", + "reprism": "^0.0.11", + "spark-md5": "^3.0.2" + } + }, "node_modules/@mixmark-io/domino": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/@mixmark-io/domino/-/domino-2.2.0.tgz", @@ -2098,6 +2185,44 @@ "@emnapi/runtime": "^1.7.1" } }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/@oxc-project/types": { "version": "0.123.0", "resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.123.0.tgz", @@ -3223,11 +3348,17 @@ "integrity": "sha512-tO4ZIRKNC+MDWV4qKVZe3Ql/woTnmHDr5JD8UI5hn2pwBrHEwOEMZK7WlNb5RKB6EoJ02gwmQS9OrjuFnZYdpg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~7.18.0" } }, + "node_modules/@types/sarif": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@types/sarif/-/sarif-2.1.7.tgz", + "integrity": "sha512-kRz0VEkJqWLf1LLVN4pT1cg1Z9wAuvI6L97V3m2f5B76Tg8d413ddvLBPTEHAZJlnn4XSvu0FkZtViCQGVyrXQ==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/yargs-parser": { "version": "21.0.3", "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", @@ -3241,7 +3372,6 @@ "integrity": "sha512-/MBdrkA8t6hbdCWFKs09dPik774xvs4Z6L4bycdCxYNLHM8oZuRyosumQMG19LUlBsB6GeVpL1q4kFFazvyKGA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@bcoe/v8-coverage": "^1.0.2", "@vitest/utils": "4.1.3", @@ -3380,6 +3510,19 @@ "url": "https://opencollective.com/vitest" } }, + "node_modules/acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/amdefine": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", @@ -3431,6 +3574,20 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/asap": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", + "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==", + "dev": true, + "license": "MIT" + }, + "node_modules/assert-never": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.4.0.tgz", + "integrity": "sha512-5oJg84os6NMQNl27T9LnZkvvqzvAnHu03ShCnoj6bsJwS7L8AO4lf+C/XjK/nvzEqQB744moC6V128RucQd1jA==", + "dev": true, + "license": "MIT" + }, "node_modules/assertion-error": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", @@ -3453,6 +3610,26 @@ "js-tokens": "^10.0.0" } }, + "node_modules/babel-walk": { + "version": "3.0.0-canary-5", + "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz", + "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.9.6" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/badgen": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/badgen/-/badgen-3.2.3.tgz", + "integrity": "sha512-svDuwkc63E/z0ky3drpUppB83s/nlgDciH9m+STwwQoWyq7yCgew1qEfJ+9axkKdNq7MskByptWUN9j1PGMwFA==", + "dev": true, + "license": "MIT" + }, "node_modules/balanced-match": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", @@ -3495,6 +3672,20 @@ "readable-stream": "^3.4.0" } }, + "node_modules/blamer": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/blamer/-/blamer-1.0.7.tgz", + "integrity": "sha512-GbBStl/EVlSWkiJQBZps3H1iARBrC7vt++Jb/TTmCNu/jZ04VW7tSN1nScbFXBUy1AN+jzeL7Zep9sbQxLhXKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^4.0.0", + "which": "^2.0.2" + }, + "engines": { + "node": ">=8.9" + } + }, "node_modules/bowser": { "version": "2.14.1", "resolved": "https://registry.npmjs.org/bowser/-/bowser-2.14.1.tgz", @@ -3514,6 +3705,19 @@ "node": "18 || 20 || >=22" } }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -3539,6 +3743,47 @@ "ieee754": "^1.1.13" } }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/chai": { "version": "6.2.2", "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", @@ -3549,6 +3794,16 @@ "node": ">=18" } }, + "node_modules/character-parser": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz", + "integrity": "sha512-+UqJQjFEFaTAs3bNsF2j2kEN1baG/zghZbdqoYEDxGZtJo9LBzl1A+m0D4n3qKx8N2FNv8/Xp6yV9mQmBuptaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-regex": "^1.0.3" + } + }, "node_modules/chownr": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", @@ -3572,6 +3827,77 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/cli-table3/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-table3/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/cli-truncate": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.2.0.tgz", @@ -3596,6 +3922,16 @@ "dev": true, "license": "MIT" }, + "node_modules/colors": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", + "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.1.90" + } + }, "node_modules/commander": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/commander/-/commander-2.8.1.tgz", @@ -3621,6 +3957,17 @@ "compressjs": "bin/compressjs" } }, + "node_modules/constantinople": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz", + "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.6.0", + "@babel/types": "^7.6.1" + } + }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", @@ -3628,6 +3975,21 @@ "dev": true, "license": "MIT" }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", @@ -3705,6 +4067,28 @@ "node": ">=0.3.1" } }, + "node_modules/doctypes": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz", + "integrity": "sha512-LLBi6pEqS6Do3EKQ3J0NqHWV5hhb78Pi8vvESYwyOy2c31ZEZVdtitdzsQsKb7878PEERhzUk0ftqGhG6Mz+pQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/emoji-regex": { "version": "10.6.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", @@ -3716,8 +4100,8 @@ "version": "1.4.5", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "devOptional": true, "license": "MIT", - "optional": true, "dependencies": { "once": "^1.4.0" } @@ -3735,6 +4119,26 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/es-module-lexer": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", @@ -3742,6 +4146,19 @@ "dev": true, "license": "MIT" }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/esbuild": { "version": "0.28.0", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.28.0.tgz", @@ -3749,7 +4166,6 @@ "dev": true, "hasInstallScript": true, "license": "MIT", - "peer": true, "bin": { "esbuild": "bin/esbuild" }, @@ -3802,6 +4218,53 @@ "dev": true, "license": "MIT" }, + "node_modules/execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, "node_modules/expand-template": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", @@ -3822,8 +4285,25 @@ "node": ">=12.0.0" } }, - "node_modules/fast-xml-builder": { - "version": "1.1.4", + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-xml-builder": { + "version": "1.1.4", "resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz", "integrity": "sha512-f2jhpN4Eccy0/Uz9csxh3Nu6q4ErKxf0XIsasomfOihuSUa3/xw6w8dnOtCDgEItQFJG8KyXPzQXzcODDrrbOg==", "funding": [ @@ -3857,6 +4337,16 @@ "fxparser": "src/cli/cli.js" } }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -3893,6 +4383,19 @@ "url": "https://github.com/sindresorhus/file-type?sponsor=1" } }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", @@ -3900,6 +4403,21 @@ "license": "MIT", "optional": true }, + "node_modules/fs-extra": { + "version": "11.3.4", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.4.tgz", + "integrity": "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -3915,6 +4433,16 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/get-east-asian-width": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", @@ -3928,6 +4456,61 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/get-tsconfig": { "version": "4.13.7", "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.7.tgz", @@ -3948,6 +4531,39 @@ "license": "MIT", "optional": true }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, "node_modules/graceful-readlink": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz", @@ -3964,6 +4580,48 @@ "node": ">=8" } }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.3.tgz", + "integrity": "sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/html-escaper": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", @@ -3971,6 +4629,16 @@ "dev": true, "license": "MIT" }, + "node_modules/human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8.12.0" + } + }, "node_modules/husky": { "version": "9.1.7", "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz", @@ -4023,6 +4691,43 @@ "node": "^20.17.0 || >=22.9.0" } }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-expression": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz", + "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^7.1.1", + "object-assign": "^4.1.1" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-fullwidth-code-point": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", @@ -4039,6 +4744,75 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-promise": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", + "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, "node_modules/istanbul-lib-coverage": { "version": "3.2.2", "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", @@ -4078,6 +4852,13 @@ "node": ">=8" } }, + "node_modules/js-stringify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz", + "integrity": "sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==", + "dev": true, + "license": "MIT" + }, "node_modules/js-tokens": { "version": "10.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-10.0.0.tgz", @@ -4085,6 +4866,73 @@ "dev": true, "license": "MIT" }, + "node_modules/jscpd": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/jscpd/-/jscpd-4.0.9.tgz", + "integrity": "sha512-fp6Sh42W3mIPoQgZmgYmKDLQzEDnnX2vaGlTN4haILkB2vsi+ewcCHEtWR/2CR/QbsBvAvsNo8U5Sa+p9aHiGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jscpd/badge-reporter": "4.0.5", + "@jscpd/core": "4.0.5", + "@jscpd/finder": "4.0.5", + "@jscpd/html-reporter": "4.0.5", + "@jscpd/tokenizer": "4.0.5", + "colors": "^1.4.0", + "commander": "^5.0.0", + "fs-extra": "^11.2.0", + "jscpd-sarif-reporter": "4.0.7" + }, + "bin": { + "jscpd": "bin/jscpd" + } + }, + "node_modules/jscpd-sarif-reporter": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/jscpd-sarif-reporter/-/jscpd-sarif-reporter-4.0.7.tgz", + "integrity": "sha512-Q/VlfTI/Nbjc8dZ/2pDVIf1aRi2bM2CTYujcAoeYr7brRnS4o5ZeW86W8q7MM7cQu40gezlNckl+E9wKFSMFiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "colors": "^1.4.0", + "fs-extra": "^11.2.0", + "node-sarif-builder": "^3.4.0" + } + }, + "node_modules/jscpd/node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/jsonfile": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.1.tgz", + "integrity": "sha512-zwOTdL3rFQ/lRdBnntKVOX6k5cKJwEc1HdilT71BWEu7J41gXIB2MRp+vxduPSwZJPWBxEzv4yH1wYLJGUHX4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jstransformer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz", + "integrity": "sha512-C9YK3Rf8q6VAPDCCU9fnqo3mAfOH6vUGnMcP4AQAYIEpWtfGLpwOTmZ+igtdK5y+VvI2n3CyYSzy4Qh34eq24A==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-promise": "^2.0.0", + "promise": "^7.0.1" + } + }, "node_modules/just-bash": { "version": "2.14.0", "resolved": "https://registry.npmjs.org/just-bash/-/just-bash-2.14.0.tgz", @@ -4504,6 +5352,84 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/markdown-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-2.0.0.tgz", + "integrity": "sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "repeat-string": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/mimic-function": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", @@ -4653,11 +5579,48 @@ "nxz": "lib/cli/nxz.js" }, "engines": { - "node": ">=16.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/oorabona" + "node": ">=16.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/oorabona" + } + }, + "node_modules/node-sarif-builder": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/node-sarif-builder/-/node-sarif-builder-3.4.0.tgz", + "integrity": "sha512-tGnJW6OKRii9u/b2WiUViTJS+h7Apxx17qsMUjsUeNDiMMX5ZFf8F8Fcz7PAQ6omvOxHZtvDTmOYKJQwmfpjeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/sarif": "^2.1.7", + "fs-extra": "^11.1.1" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" } }, "node_modules/obug": { @@ -4675,8 +5638,8 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "devOptional": true, "license": "ISC", - "optional": true, "dependencies": { "wrappy": "1" } @@ -4718,6 +5681,23 @@ "node": ">=14.0.0" } }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, "node_modules/pathe": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", @@ -4725,6 +5705,34 @@ "dev": true, "license": "MIT" }, + "node_modules/pg": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.20.0.tgz", + "integrity": "sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==", + "license": "MIT", + "optional": true, + "dependencies": { + "pg-connection-string": "^2.12.0", + "pg-pool": "^3.13.0", + "pg-protocol": "^1.13.0", + "pg-types": "2.2.0", + "pgpass": "1.0.5" + }, + "engines": { + "node": ">= 16.0.0" + }, + "optionalDependencies": { + "pg-cloudflare": "^1.3.0" + }, + "peerDependencies": { + "pg-native": ">=3.0.1" + }, + "peerDependenciesMeta": { + "pg-native": { + "optional": true + } + } + }, "node_modules/pg-cloudflare": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.3.0.tgz", @@ -4913,17 +5921,184 @@ "node": ">=10" } }, + "node_modules/promise": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", + "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "asap": "~2.0.3" + } + }, + "node_modules/pug": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.4.tgz", + "integrity": "sha512-kFfq5mMzrS7+wrl5pLJzZEzemx34OQ0w4SARfhy/3yxTlhbstsudDwJzhf1hP02yHzbjoVMSXUj/Sz6RNfMyXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "pug-code-gen": "^3.0.4", + "pug-filters": "^4.0.0", + "pug-lexer": "^5.0.1", + "pug-linker": "^4.0.0", + "pug-load": "^3.0.0", + "pug-parser": "^6.0.0", + "pug-runtime": "^3.0.1", + "pug-strip-comments": "^2.0.0" + } + }, + "node_modules/pug-attrs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz", + "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==", + "dev": true, + "license": "MIT", + "dependencies": { + "constantinople": "^4.0.1", + "js-stringify": "^1.0.2", + "pug-runtime": "^3.0.0" + } + }, + "node_modules/pug-code-gen": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.4.tgz", + "integrity": "sha512-6okWYIKdasTyXICyEtvobmTZAVX57JkzgzIi4iRJlin8kmhG+Xry2dsus+Mun/nGCn6F2U49haHI5mkELXB14g==", + "dev": true, + "license": "MIT", + "dependencies": { + "constantinople": "^4.0.1", + "doctypes": "^1.1.0", + "js-stringify": "^1.0.2", + "pug-attrs": "^3.0.0", + "pug-error": "^2.1.0", + "pug-runtime": "^3.0.1", + "void-elements": "^3.1.0", + "with": "^7.0.0" + } + }, + "node_modules/pug-error": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.1.0.tgz", + "integrity": "sha512-lv7sU9e5Jk8IeUheHata6/UThZ7RK2jnaaNztxfPYUY+VxZyk/ePVaNZ/vwmH8WqGvDz3LrNYt/+gA55NDg6Pg==", + "dev": true, + "license": "MIT" + }, + "node_modules/pug-filters": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz", + "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "constantinople": "^4.0.1", + "jstransformer": "1.0.0", + "pug-error": "^2.0.0", + "pug-walk": "^2.0.0", + "resolve": "^1.15.1" + } + }, + "node_modules/pug-lexer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz", + "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==", + "dev": true, + "license": "MIT", + "dependencies": { + "character-parser": "^2.2.0", + "is-expression": "^4.0.0", + "pug-error": "^2.0.0" + } + }, + "node_modules/pug-linker": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz", + "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==", + "dev": true, + "license": "MIT", + "dependencies": { + "pug-error": "^2.0.0", + "pug-walk": "^2.0.0" + } + }, + "node_modules/pug-load": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz", + "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "object-assign": "^4.1.1", + "pug-walk": "^2.0.0" + } + }, + "node_modules/pug-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz", + "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "pug-error": "^2.0.0", + "token-stream": "1.0.0" + } + }, + "node_modules/pug-runtime": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz", + "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg==", + "dev": true, + "license": "MIT" + }, + "node_modules/pug-strip-comments": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz", + "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "pug-error": "^2.0.0" + } + }, + "node_modules/pug-walk": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz", + "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ==", + "dev": true, + "license": "MIT" + }, "node_modules/pump": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.4.tgz", "integrity": "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==", + "devOptional": true, "license": "MIT", - "optional": true, "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/quickjs-emscripten": { "version": "0.32.0", "resolved": "https://registry.npmjs.org/quickjs-emscripten/-/quickjs-emscripten-0.32.0.tgz", @@ -4993,6 +6168,45 @@ "node": ">= 6" } }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/reprism": { + "version": "0.0.11", + "resolved": "https://registry.npmjs.org/reprism/-/reprism-0.0.11.tgz", + "integrity": "sha512-VsxDR5QxZo08M/3nRypNlScw5r3rKeSOPdU/QhDmu3Ai3BJxHn/qgfXGWQp/tAxUtzwYNo9W6997JZR0tPLZsA==", + "dev": true, + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.12", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.12.tgz", + "integrity": "sha512-TyeJ1zif53BPfHootBGwPRYT1RUt6oGWsaQr8UyZW/eAm9bKoijtvruSDEmZHm92CwS9nj7/fWttqPCgzep8CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/resolve-pkg-maps": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", @@ -5020,6 +6234,17 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, "node_modules/rfdc": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz", @@ -5061,6 +6286,30 @@ "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.13" } }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -5140,6 +6389,29 @@ "@img/sharp-win32-x64": "0.34.5" } }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/siginfo": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", @@ -5246,6 +6518,13 @@ "node": ">=0.10.0" } }, + "node_modules/spark-md5": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/spark-md5/-/spark-md5-3.0.2.tgz", + "integrity": "sha512-wcFzz9cDfbuqe0FZzfi2or1sgyIrsDwmPwfZC4hiNidPdPINjeUwNfv5kldczoEAcjl9Y1L3SM7Uz2PUEQzxQw==", + "dev": true, + "license": "(WTFPL OR MIT)" + }, "node_modules/split2": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", @@ -5335,6 +6614,16 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", @@ -5386,6 +6675,19 @@ "node": ">=8" } }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/tar-fs": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", @@ -5460,6 +6762,26 @@ "node": ">=14.0.0" } }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/token-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz", + "integrity": "sha512-VSsyNPPW74RpHwR8Fc21uubwHY7wMDeJLys2IX5zJNih+OnAnaifKHo+1LHT7DAdloQ7apeaaWg8l7qnf/TnEg==", + "dev": true, + "license": "MIT" + }, "node_modules/token-types": { "version": "6.1.2", "resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.2.tgz", @@ -6048,6 +7370,16 @@ "dev": true, "license": "MIT" }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -6061,7 +7393,6 @@ "integrity": "sha512-P1PbweD+2/udplnThz3btF4cf6AgPky7kk23RtHUkJIU5BIxwPprhRGmOAHs6FTI7UiGbTNrgNP6jSYD6JaRnw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", @@ -6140,7 +7471,6 @@ "integrity": "sha512-DBc4Tx0MPNsqb9isoyOq00lHftVx/KIU44QOm2q59npZyLUkENn8TMFsuzuO+4U2FUa9rgbbPt3udrP25GcjXw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/expect": "4.1.3", "@vitest/mocker": "4.1.3", @@ -6225,6 +7555,32 @@ } } }, + "node_modules/void-elements": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", + "integrity": "sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", @@ -6242,6 +7598,22 @@ "node": ">=8" } }, + "node_modules/with": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz", + "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.9.6", + "@babel/types": "^7.9.6", + "assert-never": "^1.2.1", + "babel-walk": "3.0.0-canary-5" + }, + "engines": { + "node": ">= 10.0.0" + } + }, "node_modules/wrap-ansi": { "version": "9.0.2", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", @@ -6282,8 +7654,8 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC", - "optional": true + "devOptional": true, + "license": "ISC" }, "node_modules/xtend": { "version": "4.0.2", diff --git a/package.json b/package.json index 81f2b6c..9f8cf74 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,8 @@ "shell": "tsx src/shell/deeplake-shell.ts", "test": "vitest run", "typecheck": "tsc --noEmit", - "ci": "npm run typecheck && npm test", + "dup": "jscpd src", + "ci": "npm run typecheck && npm run dup && npm test", "prepare": "husky" }, "lint-staged": { @@ -33,6 +34,7 @@ "@vitest/coverage-v8": "^4.1.3", "esbuild": "^0.28.0", "husky": "^9.1.7", + "jscpd": "^4.0.9", "lint-staged": "^16.4.0", "tsx": "^4.7.0", "typescript": "^6.0.0", From 35a7e87cc8756bebbb9d8a5c33bd2eed03a1a6c7 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:34:39 +0000 Subject: [PATCH 27/39] fix(shell): silence [deeplake-sql] trace in one-shot shell bundle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Claude Code's Bash tool merges the child process's stderr into the tool_result string the model sees. When a user or CI had HIVEMIND_TRACE_SQL=1 or HIVEMIND_DEBUG=1 exported, every SQL query issued by the shell bundle during `node shell-bundle -c "..."` wrote a `[deeplake-sql] query start:` line to stderr — and all of it landed in Claude's view of the command output, drowning out the real data. Confirmed on the original baseline_cloud-100 run: 35+ trace lines across the transcripts, interleaved with the bash command results Claude was trying to parse. In several QAs the SQL noise replaced the useful output entirely (exit code 1 + trace lines → Claude concluded "no matches"). Two-part fix: 1. Move the TRACE_SQL / DEBUG_FILE_LOG env checks out of the top-level module constants in `src/deeplake-api.ts` and into the `traceSql` function body. The check now evaluates per-call, so callers that import the SDK can still flip the env vars at runtime. (Previously the constants were frozen at module load, so any downstream delete had no effect.) 2. In `src/shell/deeplake-shell.ts`, detect one-shot mode (`-c` in argv) up front and `delete process.env[...]` the four trace variables before doing anything else. Interactive REPL mode keeps the env untouched so developers still get `[deeplake-sql]` lines when they set the vars intentionally. Test coverage in `claude-code/tests/shell-bundle-sql-trace-silence.test.ts`: - Spawns the built `claude-code/bundle/shell/deeplake-shell.js` with fake creds and HIVEMIND_TRACE_SQL / DEEPLAKE_TRACE_SQL / HIVEMIND_DEBUG / DEEPLAKE_DEBUG all set to "1", pointed at an unreachable API URL with a 200ms query timeout. After the SQL query fails (expected), asserts stderr is free of `[deeplake-sql]` lines. - A source-level check confirms `traceSql` reads the env vars inside the function body (runtime) rather than via a frozen top-level `const TRACE_SQL`. Regression verified: stashing both source changes causes the bundle test to fail with the expected `[deeplake-sql] query fail:` line in stderr and the source-level test to report the reintroduced top-level const; restoring the source brings both green. End-to-end verified against `locomo_benchmark/baseline` on a 6-QA subset (conv 0 QAs 3 / 11 / 27 / 32 / 59 / 65). Before fix: 2–4 SQL trace lines leaked into each QA's tool_result stream. After fix: zero leaks across all six transcripts. qa_3 and qa_11 (already correct with fix #1 + fix #2) stay correct; the hard QAs (27, 32, 59, 65) continue to show judge-score variance under Haiku non-determinism but are no longer looking at SQL noise as their "retrieval result". --- claude-code/bundle/capture.js | 8 +- claude-code/bundle/commands/auth-login.js | 8 +- claude-code/bundle/pre-tool-use.js | 8 +- claude-code/bundle/session-end.js | 8 +- claude-code/bundle/session-start-setup.js | 8 +- claude-code/bundle/shell/deeplake-shell.js | 16 ++-- .../shell-bundle-sql-trace-silence.test.ts | 86 +++++++++++++++++++ codex/bundle/commands/auth-login.js | 8 +- codex/bundle/pre-tool-use.js | 8 +- codex/bundle/session-start-setup.js | 8 +- codex/bundle/shell/deeplake-shell.js | 16 ++-- codex/bundle/stop.js | 8 +- src/deeplake-api.ts | 15 +++- src/shell/deeplake-shell.ts | 16 +++- 14 files changed, 169 insertions(+), 52 deletions(-) create mode 100644 claude-code/tests/shell-bundle-sql-trace-silence.test.ts diff --git a/claude-code/bundle/capture.js b/claude-code/bundle/capture.js index 82a4aac..2bec8a1 100755 --- a/claude-code/bundle/capture.js +++ b/claude-code/bundle/capture.js @@ -88,18 +88,18 @@ function sqlIdent(name) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/claude-code/bundle/commands/auth-login.js b/claude-code/bundle/commands/auth-login.js index ff5e179..064f11e 100755 --- a/claude-code/bundle/commands/auth-login.js +++ b/claude-code/bundle/commands/auth-login.js @@ -263,18 +263,18 @@ function sqlStr(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index 3102cf7..4d7e9de 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -88,18 +88,18 @@ function sqlLike(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/claude-code/bundle/session-end.js b/claude-code/bundle/session-end.js index 944977c..b253d22 100755 --- a/claude-code/bundle/session-end.js +++ b/claude-code/bundle/session-end.js @@ -88,18 +88,18 @@ function sqlIdent(name) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/claude-code/bundle/session-start-setup.js b/claude-code/bundle/session-start-setup.js index 77621bc..d9b60b8 100755 --- a/claude-code/bundle/session-start-setup.js +++ b/claude-code/bundle/session-start-setup.js @@ -100,18 +100,18 @@ function sqlIdent(name) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/claude-code/bundle/shell/deeplake-shell.js b/claude-code/bundle/shell/deeplake-shell.js index 5872059..10a40c9 100755 --- a/claude-code/bundle/shell/deeplake-shell.js +++ b/claude-code/bundle/shell/deeplake-shell.js @@ -66785,18 +66785,18 @@ function sqlLike(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); @@ -69147,6 +69147,13 @@ function createGrepCommand(client, fs3, table, sessionsTable) { // dist/src/shell/deeplake-shell.js async function main() { + const isOneShot = process.argv.includes("-c"); + if (isOneShot) { + delete process.env["HIVEMIND_TRACE_SQL"]; + delete process.env["DEEPLAKE_TRACE_SQL"]; + delete process.env["HIVEMIND_DEBUG"]; + delete process.env["DEEPLAKE_DEBUG"]; + } const config = loadConfig(); if (!config) { process.stderr.write("Deeplake credentials not found.\nSet HIVEMIND_TOKEN + HIVEMIND_ORG_ID in environment, or create ~/.deeplake/credentials.json\n"); @@ -69155,7 +69162,6 @@ async function main() { const table = process.env["HIVEMIND_TABLE"] ?? "memory"; const sessionsTable = process.env["HIVEMIND_SESSIONS_TABLE"] ?? "sessions"; const mount = process.env["HIVEMIND_MOUNT"] ?? "/"; - const isOneShot = process.argv.includes("-c"); const client = new DeeplakeApi(config.token, config.apiUrl, config.orgId, config.workspaceId, table); if (!isOneShot) { process.stderr.write(`Connecting to deeplake://${config.workspaceId}/${table} ... diff --git a/claude-code/tests/shell-bundle-sql-trace-silence.test.ts b/claude-code/tests/shell-bundle-sql-trace-silence.test.ts new file mode 100644 index 0000000..2c55dd7 --- /dev/null +++ b/claude-code/tests/shell-bundle-sql-trace-silence.test.ts @@ -0,0 +1,86 @@ +/** + * Bundle-level regression guard for fix #3 — the shell bundle invoked by the + * pre-tool-use hook as `node shell-bundle -c "..."` must not leak + * `[deeplake-sql]` trace output onto stderr. Claude Code's Bash tool merges + * the child process's stderr into the tool_result string the model sees, so + * any trace line shows up as noise in Claude's view of the command output + * (observed in the original `baseline_cloud-100` transcripts, where 35+ + * lines of `[deeplake-sql]` noise polluted bash command results). + * + * The fix has two parts: + * 1. `traceSql` reads the HIVEMIND_TRACE_SQL / HIVEMIND_DEBUG env vars at + * call time (not at module load), so callers can turn tracing off after + * importing the SDK. + * 2. The shell bundle's one-shot entry point (`node ... -c "cmd"`) deletes + * those env vars before opening any SQL connection. + * + * This test spawns the shipped shell bundle with the trace vars set + * explicitly, runs a trivial command that's guaranteed not to touch the + * network (we point the SDK at an unreachable URL and expect the command to + * fail fast), and asserts that the combined stderr output contains zero + * `[deeplake-sql]` lines. If either fix is reverted, stderr fills with the + * trace messages and the test fails. + */ + +import { describe, expect, it } from "vitest"; +import { spawnSync } from "node:child_process"; +import { existsSync } from "node:fs"; +import { join, dirname } from "node:path"; +import { fileURLToPath } from "node:url"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const BUNDLE_PATH = join(__dirname, "..", "bundle", "shell", "deeplake-shell.js"); + +describe("shell bundle one-shot: SQL trace silence (fix #3)", () => { + it("does not write [deeplake-sql] to stderr even when trace env vars are set", () => { + if (!existsSync(BUNDLE_PATH)) { + throw new Error(`shell bundle missing at ${BUNDLE_PATH} — run 'npm run build' first`); + } + + // Drive the bundle through a path that DEFINITELY calls DeeplakeApi.query() + // (so traceSql fires). Fake creds are good enough — the API call will fail + // fast against an unreachable host, and if the trace silencer regresses, + // the first `[deeplake-sql] query start:` line hits stderr before the + // failure. Point at 127.0.0.1:1 (closed port) with a 200ms timeout so the + // test finishes in well under a second. + const cleanEnv: NodeJS.ProcessEnv = { + PATH: process.env.PATH, + HIVEMIND_TOKEN: "fake-token-for-trace-test", + HIVEMIND_ORG_ID: "fake-org", + HIVEMIND_WORKSPACE_ID: "fake-ws", + HIVEMIND_API_URL: "http://127.0.0.1:1", + HIVEMIND_QUERY_TIMEOUT_MS: "200", + // Pre-silenced env: our fix must keep these from leaking stderr. + HIVEMIND_TRACE_SQL: "1", + DEEPLAKE_TRACE_SQL: "1", + HIVEMIND_DEBUG: "1", + DEEPLAKE_DEBUG: "1", + }; + + const result = spawnSync(process.execPath, [BUNDLE_PATH, "-c", "echo hello"], { + env: cleanEnv, + encoding: "utf-8", + timeout: 15_000, + }); + + const combined = `${result.stdout ?? ""}\n${result.stderr ?? ""}`; + // With the one-shot silencer in place there must be zero SQL trace lines, + // even though the bundle issued SQL queries (that then failed against the + // unreachable host). If the fix regresses, expect lines like: + // "[deeplake-sql] query start: SELECT path, size_bytes ..." + expect(combined).not.toContain("[deeplake-sql]"); + }, 20_000); + + it("keeps interactive mode tracing available (env vars not deleted outside one-shot)", () => { + // Sanity check that the one-shot silencing is scoped: traceSql source + // still honours the env vars, so interactive usage (no -c) with + // HIVEMIND_TRACE_SQL=1 would still emit trace lines. We can't easily + // spawn the REPL here, so we just verify the condition in source — this + // guards against an over-eager fix that silences tracing globally. + const { readFileSync } = require("node:fs"); + const apiSource = readFileSync(join(__dirname, "..", "..", "src", "deeplake-api.ts"), "utf-8"); + expect(apiSource).toMatch(/function traceSql\([^)]*\): void \{[\s\S]*process\.env\.HIVEMIND_TRACE_SQL/); + // Ensure the env read is inside the function (runtime), not a top-level const. + expect(apiSource).not.toMatch(/^const TRACE_SQL =/m); + }); +}); diff --git a/codex/bundle/commands/auth-login.js b/codex/bundle/commands/auth-login.js index ff5e179..064f11e 100755 --- a/codex/bundle/commands/auth-login.js +++ b/codex/bundle/commands/auth-login.js @@ -263,18 +263,18 @@ function sqlStr(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/codex/bundle/pre-tool-use.js b/codex/bundle/pre-tool-use.js index 5ba57c3..37fb1c2 100755 --- a/codex/bundle/pre-tool-use.js +++ b/codex/bundle/pre-tool-use.js @@ -88,18 +88,18 @@ function sqlLike(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/codex/bundle/session-start-setup.js b/codex/bundle/session-start-setup.js index e13a5e2..6a37fb5 100755 --- a/codex/bundle/session-start-setup.js +++ b/codex/bundle/session-start-setup.js @@ -97,18 +97,18 @@ function sqlIdent(name) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/codex/bundle/shell/deeplake-shell.js b/codex/bundle/shell/deeplake-shell.js index 5872059..10a40c9 100755 --- a/codex/bundle/shell/deeplake-shell.js +++ b/codex/bundle/shell/deeplake-shell.js @@ -66785,18 +66785,18 @@ function sqlLike(value) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); @@ -69147,6 +69147,13 @@ function createGrepCommand(client, fs3, table, sessionsTable) { // dist/src/shell/deeplake-shell.js async function main() { + const isOneShot = process.argv.includes("-c"); + if (isOneShot) { + delete process.env["HIVEMIND_TRACE_SQL"]; + delete process.env["DEEPLAKE_TRACE_SQL"]; + delete process.env["HIVEMIND_DEBUG"]; + delete process.env["DEEPLAKE_DEBUG"]; + } const config = loadConfig(); if (!config) { process.stderr.write("Deeplake credentials not found.\nSet HIVEMIND_TOKEN + HIVEMIND_ORG_ID in environment, or create ~/.deeplake/credentials.json\n"); @@ -69155,7 +69162,6 @@ async function main() { const table = process.env["HIVEMIND_TABLE"] ?? "memory"; const sessionsTable = process.env["HIVEMIND_SESSIONS_TABLE"] ?? "sessions"; const mount = process.env["HIVEMIND_MOUNT"] ?? "/"; - const isOneShot = process.argv.includes("-c"); const client = new DeeplakeApi(config.token, config.apiUrl, config.orgId, config.workspaceId, table); if (!isOneShot) { process.stderr.write(`Connecting to deeplake://${config.workspaceId}/${table} ... diff --git a/codex/bundle/stop.js b/codex/bundle/stop.js index b2da8a8..3834f43 100755 --- a/codex/bundle/stop.js +++ b/codex/bundle/stop.js @@ -88,18 +88,18 @@ function sqlIdent(name) { // dist/src/deeplake-api.js var log2 = (msg) => log("sdk", msg); -var TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -var DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql, maxLen = 220) { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } function traceSql(msg) { - if (!TRACE_SQL) + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg} `); - if (DEBUG_FILE_LOG) + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log2(msg); } var RETRYABLE_CODES = /* @__PURE__ */ new Set([429, 500, 502, 503, 504]); diff --git a/src/deeplake-api.ts b/src/deeplake-api.ts index 4b1dfed..a003b04 100644 --- a/src/deeplake-api.ts +++ b/src/deeplake-api.ts @@ -6,18 +6,25 @@ import { log as _log } from "./utils/debug.js"; import { sqlStr } from "./utils/sql.js"; const log = (msg: string) => _log("sdk", msg); -const TRACE_SQL = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; -const DEBUG_FILE_LOG = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; function summarizeSql(sql: string, maxLen = 220): string { const compact = sql.replace(/\s+/g, " ").trim(); return compact.length > maxLen ? `${compact.slice(0, maxLen)}...` : compact; } +/** + * SQL tracing is opt-in and evaluated on every call so callers can flip the + * env vars after module load (e.g. the one-shot shell bundle silences + * `[deeplake-sql]` stderr writes so they don't land in Claude Code's + * Bash-tool result — Claude Code merges child stderr into tool_result). + */ function traceSql(msg: string): void { - if (!TRACE_SQL) return; + const traceEnabled = (process.env.HIVEMIND_TRACE_SQL ?? process.env.DEEPLAKE_TRACE_SQL) === "1" + || (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (!traceEnabled) return; process.stderr.write(`[deeplake-sql] ${msg}\n`); - if (DEBUG_FILE_LOG) log(msg); + const debugFileLog = (process.env.HIVEMIND_DEBUG ?? process.env.DEEPLAKE_DEBUG) === "1"; + if (debugFileLog) log(msg); } // ── Retry & concurrency primitives ────────────────────────────────────────── diff --git a/src/shell/deeplake-shell.ts b/src/shell/deeplake-shell.ts index dcdbfa5..e58dfb8 100644 --- a/src/shell/deeplake-shell.ts +++ b/src/shell/deeplake-shell.ts @@ -29,6 +29,20 @@ import { DeeplakeFs } from "./deeplake-fs.js"; import { createGrepCommand } from "./grep-interceptor.js"; async function main(): Promise { + const isOneShot = process.argv.includes("-c"); + + // One-shot mode is what the pre-tool-use hook invokes via `node shell-bundle -c "..."` + // to execute compound bash commands. Claude Code's Bash tool merges the child's + // stderr into the tool_result string Claude sees, so any `[deeplake-sql]` trace + // written to stderr here pollutes the model's view of the command output. + // Silence trace env vars regardless of how the caller set them. + if (isOneShot) { + delete process.env["HIVEMIND_TRACE_SQL"]; + delete process.env["DEEPLAKE_TRACE_SQL"]; + delete process.env["HIVEMIND_DEBUG"]; + delete process.env["DEEPLAKE_DEBUG"]; + } + const config = loadConfig(); if (!config) { process.stderr.write( @@ -42,8 +56,6 @@ async function main(): Promise { const sessionsTable = process.env["HIVEMIND_SESSIONS_TABLE"] ?? "sessions"; const mount = process.env["HIVEMIND_MOUNT"] ?? "/"; - const isOneShot = process.argv.includes("-c"); - const client = new DeeplakeApi( config.token, config.apiUrl, config.orgId, config.workspaceId, table ); From 7c82c2737f824712d6c103efe8079b4abe6bed26 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 21:49:34 +0000 Subject: [PATCH 28/39] test(wiki-worker): source-level coverage for CC + Codex workers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both wiki-worker.ts files sat at 0% branch coverage after the refactor brought them into the PR diff (via the debug-log observability adds). This direct source-level test suite drives each worker through every significant branch: Early exit: - zero session events → log + exit, still releaseLock in finally - null rows / null columns treated as empty Happy path: - fetch events + reconstruct JSONL (string rows) - JSONB object rows serialize correctly via JSON.stringify - existing summary on resumed session → parse **JSONL offset** - empty path-SELECT falls back to /sessions/unknown/.jsonl - prompt template expands all 7 placeholders - agent label: "claude_code" for CC, "codex" for Codex - execFileSync options include HIVEMIND_WIKI_WORKER=1 + HIVEMIND_CAPTURE=false to prevent the child from recursing claude -p / codex exec failure: - err.status on Error → logged and upload skipped - err.message fallback when no .status query retry logic: - retries on 500 + the full CloudFlare class (401/403/429/502/503) - non-retryable 400 → throws → main catches and logs fatal - retry exhaustion → throws after the loop - setTimeout spy stops real sleeps finalize + release edges: - finalizeSummary throw logged, releaseLock still runs - releaseLock throw in finally is swallowed, worker completes - whitespace-only summary file skips upload AND finalize Structural note: wiki.log now lives in a separate hooksDir from the worker's tmpDir so the finally block's `rmSync(tmpDir)` does NOT delete the log file before tests can read it back. Aggregate branch coverage on the 14 PR files: 336/365 (92.05%). --- claude-code/tests/codex-wiki-worker.test.ts | 358 +++++++++++++++++ claude-code/tests/wiki-worker.test.ts | 422 ++++++++++++++++++++ 2 files changed, 780 insertions(+) create mode 100644 claude-code/tests/codex-wiki-worker.test.ts create mode 100644 claude-code/tests/wiki-worker.test.ts diff --git a/claude-code/tests/codex-wiki-worker.test.ts b/claude-code/tests/codex-wiki-worker.test.ts new file mode 100644 index 0000000..6a4260a --- /dev/null +++ b/claude-code/tests/codex-wiki-worker.test.ts @@ -0,0 +1,358 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { mkdtempSync, rmSync, writeFileSync, readFileSync, mkdirSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +/** + * Source-level tests for src/hooks/codex/wiki-worker.ts. Mirrors the + * CC wiki-worker test: mock fetch + execFileSync + summary-state + + * upload-summary, feed a config file via process.argv[2], drive the + * module through every branch. + * + * Codex-specific differences vs the CC worker: + * - binary key is `codexBin` (not `claudeBin`) + * - invoked as `codex exec --dangerously-bypass-approvals-and-sandbox ` + * - agent label on upload is `"codex"` (not `"claude_code"`) + */ + +const finalizeSummaryMock = vi.fn(); +const releaseLockMock = vi.fn(); +const uploadSummaryMock = vi.fn(); +const execFileSyncMock = vi.fn(); + +vi.mock("../../src/hooks/summary-state.js", () => ({ + finalizeSummary: (...a: any[]) => finalizeSummaryMock(...a), + releaseLock: (...a: any[]) => releaseLockMock(...a), +})); +vi.mock("../../src/hooks/upload-summary.js", () => ({ + uploadSummary: (...a: any[]) => uploadSummaryMock(...a), +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, execFileSync: (...a: any[]) => execFileSyncMock(...a) }; +}); + +const originalFetch = global.fetch; +const fetchMock = vi.fn(); +const originalArgv2 = process.argv[2]; + +let rootDir: string; +let tmpDir: string; +let hooksDir: string; +let configPath: string; + +const defaultConfig = () => ({ + apiUrl: "http://fake.local", + token: "tok", + orgId: "org", + workspaceId: "default", + memoryTable: "memory", + sessionsTable: "sessions", + sessionId: "sid-codex", + userName: "alice", + project: "proj", + tmpDir, + codexBin: "/fake/codex", + wikiLog: join(hooksDir, "wiki.log"), + hooksDir, + promptTemplate: "JSONL=__JSONL__ SUMMARY=__SUMMARY__ SID=__SESSION_ID__ PROJ=__PROJECT__ OFFSET=__PREV_OFFSET__ LINES=__JSONL_LINES__ SRC=__JSONL_SERVER_PATH__", +}); + +function writeConfig(overrides: Partial> = {}): void { + const cfg = { ...defaultConfig(), ...overrides }; + writeFileSync(configPath, JSON.stringify(cfg)); +} + +function jsonResp(body: unknown, ok = true, status = 200): Response { + return { + ok, status, + json: async () => body, + text: async () => typeof body === "string" ? body : JSON.stringify(body), + } as Response; +} + +async function runWorker(): Promise { + vi.resetModules(); + // @ts-expect-error + global.fetch = fetchMock; + await import("../../src/hooks/codex/wiki-worker.js"); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +beforeEach(() => { + rootDir = mkdtempSync(join(tmpdir(), "codex-wiki-worker-test-")); + tmpDir = join(rootDir, "tmp"); + hooksDir = join(rootDir, "hooks"); + mkdirSync(tmpDir, { recursive: true }); + mkdirSync(hooksDir, { recursive: true }); + configPath = join(rootDir, "config.json"); + writeConfig(); + process.argv[2] = configPath; + fetchMock.mockReset(); + finalizeSummaryMock.mockReset(); + releaseLockMock.mockReset(); + uploadSummaryMock.mockReset().mockResolvedValue({ path: "insert", summaryLength: 80, descLength: 15, sql: "..." }); + execFileSyncMock.mockReset(); +}); + +afterEach(() => { + // @ts-expect-error + global.fetch = originalFetch; + process.argv[2] = originalArgv2; + try { rmSync(rootDir, { recursive: true, force: true }); } catch { /* ignore */ } + vi.restoreAllMocks(); +}); + +// ═══ early exit ═════════════════════════════════════════════════════════════ + +describe("codex wiki-worker — no events", () => { + it("exits early when the sessions table has no rows for this session", async () => { + fetchMock.mockResolvedValue(jsonResp({ columns: ["message", "creation_date"], rows: [] })); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("no session events found — exiting"); + expect(execFileSyncMock).not.toHaveBeenCalled(); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(releaseLockMock).toHaveBeenCalledWith("sid-codex"); + }); + + it("handles a response with null rows as empty", async () => { + fetchMock.mockResolvedValue(jsonResp({})); + await runWorker(); + expect(execFileSyncMock).not.toHaveBeenCalled(); + }); +}); + +// ═══ happy path ═════════════════════════════════════════════════════════════ + +describe("codex wiki-worker — happy path", () => { + const eventRow = [ + { message: JSON.stringify({ type: "user_message", content: "hello codex" }), creation_date: "2026-04-20T00:00:00Z" }, + ]; + + const mkFetch = (pathRows = 1, hasSummary = false) => { + return fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message, creation_date")) { + return jsonResp({ columns: ["message", "creation_date"], rows: eventRow.map(r => [r.message, r.creation_date]) }); + } + if (sql.startsWith("SELECT DISTINCT path")) { + return jsonResp({ + columns: ["path"], + rows: pathRows > 0 ? [["/sessions/alice/alice_org_default_sid-codex.jsonl"]] : [], + }); + } + if (sql.startsWith("SELECT summary FROM")) { + if (hasSummary) { + return jsonResp({ columns: ["summary"], rows: [["# Session X\n- **JSONL offset**: 7\n\n## What Happened\nprior"]] }); + } + return jsonResp({ columns: ["summary"], rows: [] }); + } + throw new Error(`unexpected query: ${sql}`); + }); + }; + + it("runs `codex exec --dangerously-bypass-approvals-and-sandbox ` and uploads summary", async () => { + mkFetch(); + let capturedJsonl: string | null = null; + execFileSyncMock.mockImplementation((bin: string, args: string[]) => { + expect(bin).toBe("/fake/codex"); + expect(args[0]).toBe("exec"); + expect(args[1]).toBe("--dangerously-bypass-approvals-and-sandbox"); + const prompt = args[2]; + const jsonlPath = prompt.match(/JSONL=(\S+)/)![1]; + capturedJsonl = readFileSync(jsonlPath, "utf-8"); + const summaryPath = prompt.match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# Session sid-codex\n\n## What Happened\ndone.\n"); + return Buffer.from(""); + }); + await runWorker(); + + expect(capturedJsonl).toContain('"type":"user_message"'); + expect(capturedJsonl).toContain('"content":"hello codex"'); + + // codex exec is invoked with HIVEMIND_WIKI_WORKER=1 to prevent the + // child's own capture hook from recursing back into this worker. + const execOpts = execFileSyncMock.mock.calls[0][2]; + expect(execOpts.env.HIVEMIND_WIKI_WORKER).toBe("1"); + expect(execOpts.env.HIVEMIND_CAPTURE).toBe("false"); + + // Upload agent is 'codex' (not 'claude_code') + expect(uploadSummaryMock).toHaveBeenCalledTimes(1); + const params = uploadSummaryMock.mock.calls[0][1]; + expect(params.agent).toBe("codex"); + expect(params.sessionId).toBe("sid-codex"); + + expect(finalizeSummaryMock).toHaveBeenCalledWith("sid-codex", 1); + expect(releaseLockMock).toHaveBeenCalledWith("sid-codex"); + }); + + it("parses JSONL offset from an existing summary on resumed session", async () => { + mkFetch(1, true); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const prompt = args[2]; + const summaryPath = prompt.match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# updated\n\n## What Happened\n...\n"); + return Buffer.from(""); + }); + await runWorker(); + const prompt = execFileSyncMock.mock.calls[0][1][2] as string; + expect(prompt).toContain("OFFSET=7"); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("existing summary found, offset=7"); + }); + + it("falls back to /sessions/unknown/ when path SELECT empty", async () => { + mkFetch(0); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[2].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "x\n"); + return Buffer.from(""); + }); + await runWorker(); + const prompt = execFileSyncMock.mock.calls[0][1][2] as string; + expect(prompt).toContain("SRC=/sessions/unknown/sid-codex.jsonl"); + }); + + it("serializes JSONB object rows by stringifying them", async () => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message, creation_date")) { + return jsonResp({ + columns: ["message", "creation_date"], + rows: [[{ type: "user_message", content: "obj" }, "t"]], + }); + } + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + let capturedJsonl: string | null = null; + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const jsonlPath = args[2].match(/JSONL=(\S+)/)![1]; + capturedJsonl = readFileSync(jsonlPath, "utf-8"); + const summaryPath = args[2].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "x"); + return Buffer.from(""); + }); + await runWorker(); + expect(capturedJsonl).toContain('"type":"user_message"'); + }); +}); + +// ═══ codex exec failure ════════════════════════════════════════════════════ + +describe("codex wiki-worker — codex exec failure", () => { + beforeEach(() => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message")) return jsonResp({ columns: ["message", "creation_date"], rows: [["{}", "t"]] }); + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + }); + + it("logs status and skips upload when codex exec throws without producing a summary", async () => { + const err: any = new Error("codex crashed"); + err.status = 99; + execFileSyncMock.mockImplementation(() => { throw err; }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("codex exec failed: 99"); + expect(log).toContain("no summary file generated"); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("falls back to err.message when err.status is absent", async () => { + execFileSyncMock.mockImplementation(() => { throw new Error("no status here"); }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("codex exec failed: no status here"); + }); +}); + +// ═══ query retry logic ═════════════════════════════════════════════════════ + +describe("codex wiki-worker — query retry logic", () => { + beforeEach(() => { + vi.spyOn(global, "setTimeout").mockImplementation(((cb: any) => { + cb(); + return 0 as any; + }) as any); + }); + + it("retries on 500 until success", async () => { + const responses = [ + jsonResp("server error", false, 500), + jsonResp({ columns: ["message", "creation_date"], rows: [] }), + ]; + fetchMock.mockImplementation(async () => responses.shift()!); + await runWorker(); + expect(fetchMock.mock.calls.length).toBeGreaterThanOrEqual(2); + }); + + it("retries on CloudFlare rate-limit class 401/403/429", async () => { + for (const status of [401, 403, 429]) { + fetchMock.mockReset(); + fetchMock + .mockResolvedValueOnce(jsonResp("", false, status)) + .mockResolvedValue(jsonResp({ columns: ["message", "creation_date"], rows: [] })); + await runWorker(); + expect(fetchMock.mock.calls.length).toBeGreaterThanOrEqual(2); + } + }); + + it("throws on 400 (non-retryable) and main catches", async () => { + fetchMock.mockResolvedValue(jsonResp("bad", false, 400)); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toMatch(/fatal: API 400/); + expect(releaseLockMock).toHaveBeenCalled(); + }); +}); + +// ═══ finalize + release + empty summary ═══════════════════════════════════ + +describe("codex wiki-worker — finalize + release edges", () => { + beforeEach(() => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message")) return jsonResp({ columns: ["message", "creation_date"], rows: [["{}", "t"]] }); + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[2].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# s\n\n## What Happened\nX\n"); + return Buffer.from(""); + }); + }); + + it("logs sidecar update failure but still releases lock", async () => { + finalizeSummaryMock.mockImplementation(() => { throw new Error("sidecar boom"); }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("sidecar update failed: sidecar boom"); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("swallows releaseLock throw in finally", async () => { + releaseLockMock.mockImplementation(() => { throw new Error("release boom"); }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("done"); + }); + + it("skips upload when summary file is whitespace-only", async () => { + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[2].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, " \n\n"); + return Buffer.from(""); + }); + await runWorker(); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(finalizeSummaryMock).not.toHaveBeenCalled(); + }); +}); diff --git a/claude-code/tests/wiki-worker.test.ts b/claude-code/tests/wiki-worker.test.ts new file mode 100644 index 0000000..f287cc1 --- /dev/null +++ b/claude-code/tests/wiki-worker.test.ts @@ -0,0 +1,422 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { mkdtempSync, rmSync, writeFileSync, readFileSync, existsSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +/** + * Direct source-level tests for src/hooks/wiki-worker.ts. The module + * reads its config JSON from process.argv[2] at module load, then + * runs main() immediately. Each scenario writes a fresh config file + * under a tmp dir, points process.argv[2] at it, wires the mocks, and + * dynamically imports the worker. + * + * Mocks: + * - global.fetch (the query() helper) + * - child_process.execFileSync (the claude -p invocation) + * - summary-state (finalizeSummary + releaseLock) + * - upload-summary (uploadSummary) + * + * fs stays real: the worker writes the reconstructed JSONL and the + * summary markdown to the tmp dir, and main() reads the summary back + * after claude -p has "written" it. The execFileSync mock simulates + * claude by writing the summary file directly, which is how the real + * binary behaves from the worker's perspective. + */ + +const finalizeSummaryMock = vi.fn(); +const releaseLockMock = vi.fn(); +const uploadSummaryMock = vi.fn(); +const execFileSyncMock = vi.fn(); + +vi.mock("../../src/hooks/summary-state.js", () => ({ + finalizeSummary: (...a: any[]) => finalizeSummaryMock(...a), + releaseLock: (...a: any[]) => releaseLockMock(...a), +})); +vi.mock("../../src/hooks/upload-summary.js", () => ({ + uploadSummary: (...a: any[]) => uploadSummaryMock(...a), +})); +vi.mock("node:child_process", async () => { + const actual = await vi.importActual("node:child_process"); + return { ...actual, execFileSync: (...a: any[]) => execFileSyncMock(...a) }; +}); + +const originalFetch = global.fetch; +const fetchMock = vi.fn(); + +const originalArgv2 = process.argv[2]; + +let rootDir: string; // shared parent — NOT removed by the worker +let tmpDir: string; // worker's tmpDir, rmSync'd in cleanup() +let hooksDir: string; // wiki.log lives here; must outlive tmpDir +let configPath: string; + +const defaultConfig = () => ({ + apiUrl: "http://fake.local", + token: "tok", + orgId: "org", + workspaceId: "default", + memoryTable: "memory", + sessionsTable: "sessions", + sessionId: "sid-worker", + userName: "alice", + project: "proj", + tmpDir, + claudeBin: "/fake/claude", + wikiLog: join(hooksDir, "wiki.log"), + hooksDir, + promptTemplate: "JSONL=__JSONL__ SUMMARY=__SUMMARY__ SID=__SESSION_ID__ PROJ=__PROJECT__ OFFSET=__PREV_OFFSET__ LINES=__JSONL_LINES__ SRC=__JSONL_SERVER_PATH__", +}); + +function writeConfig(overrides: Partial> = {}): void { + const cfg = { ...defaultConfig(), ...overrides }; + writeFileSync(configPath, JSON.stringify(cfg)); +} + +function jsonResp(body: unknown, ok = true, status = 200): Response { + return { + ok, + status, + json: async () => body, + text: async () => typeof body === "string" ? body : JSON.stringify(body), + } as Response; +} + +async function runWorker(): Promise { + vi.resetModules(); + // @ts-expect-error + global.fetch = fetchMock; + await import("../../src/hooks/wiki-worker.js"); + // Let main() and all its awaits complete. + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); + await new Promise(r => setImmediate(r)); +} + +beforeEach(() => { + rootDir = mkdtempSync(join(tmpdir(), "wiki-worker-test-")); + tmpDir = join(rootDir, "tmp"); + hooksDir = join(rootDir, "hooks"); + // The worker will mkdir hooksDir lazily via wlog, but it needs tmpDir + // to exist for writeFileSync(tmpJsonl, ...). + require("node:fs").mkdirSync(tmpDir, { recursive: true }); + require("node:fs").mkdirSync(hooksDir, { recursive: true }); + configPath = join(rootDir, "config.json"); + writeConfig(); + process.argv[2] = configPath; + fetchMock.mockReset(); + finalizeSummaryMock.mockReset(); + releaseLockMock.mockReset(); + uploadSummaryMock.mockReset().mockResolvedValue({ path: "insert", summaryLength: 100, descLength: 20, sql: "..." }); + execFileSyncMock.mockReset(); +}); + +afterEach(() => { + // @ts-expect-error + global.fetch = originalFetch; + process.argv[2] = originalArgv2; + try { rmSync(rootDir, { recursive: true, force: true }); } catch { /* ignore */ } + vi.restoreAllMocks(); +}); + +// ═══ early exit: zero events ═══════════════════════════════════════════════ + +describe("wiki-worker — no events", () => { + it("exits early when the sessions table has no rows for this session", async () => { + fetchMock.mockResolvedValue(jsonResp({ columns: ["message", "creation_date"], rows: [] })); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("no session events found — exiting"); + expect(execFileSyncMock).not.toHaveBeenCalled(); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(finalizeSummaryMock).not.toHaveBeenCalled(); + // The finally block must still release the lock. + expect(releaseLockMock).toHaveBeenCalledWith("sid-worker"); + }); + + it("treats a response with null rows/columns as empty", async () => { + fetchMock.mockResolvedValue(jsonResp({})); + await runWorker(); + expect(execFileSyncMock).not.toHaveBeenCalled(); + expect(releaseLockMock).toHaveBeenCalled(); + }); +}); + +// ═══ happy path: events + claude -p + upload ═══════════════════════════════ + +describe("wiki-worker — happy path", () => { + const eventRows = [ + { message: JSON.stringify({ type: "user_message", content: "hi" }), creation_date: "2026-04-20T00:00:00Z" }, + { message: JSON.stringify({ type: "assistant_message", content: "hello" }), creation_date: "2026-04-20T00:00:01Z" }, + ]; + + const mkFetch = (eventsCol: string[] = ["message", "creation_date"], pathRows = 1, hasSummary = false) => { + let call = 0; + return fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message, creation_date")) { + return jsonResp({ columns: eventsCol, rows: eventRows.map(r => [r.message, r.creation_date]) }); + } + if (sql.startsWith("SELECT DISTINCT path")) { + return jsonResp({ + columns: ["path"], + rows: pathRows > 0 ? [["/sessions/alice/alice_org_default_sid-worker.jsonl"]] : [], + }); + } + if (sql.startsWith("SELECT summary FROM")) { + if (hasSummary) { + return jsonResp({ columns: ["summary"], rows: [["# Session X\n- **JSONL offset**: 12\n\n## What Happened\nprior"]] }); + } + return jsonResp({ columns: ["summary"], rows: [] }); + } + call++; + throw new Error(`unexpected query (${call}): ${sql}`); + }); + }; + + it("fetches events, writes JSONL, runs claude -p, uploads, finalizes, releases", async () => { + mkFetch(); + let capturedJsonl: string | null = null; + // Simulate claude -p producing a summary file. We also snapshot the + // reconstructed JSONL here because cleanup() will rmSync tmpDir + // before the test can read it back from disk. + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const promptIdx = args.indexOf("-p") + 1; + const prompt = args[promptIdx]; + const jsonlPath = prompt.match(/JSONL=(\S+)/)![1]; + capturedJsonl = readFileSync(jsonlPath, "utf-8"); + const summaryPath = prompt.match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# Session sid-worker\n\n## What Happened\nStuff happened.\n"); + return Buffer.from(""); + }); + await runWorker(); + + // JSONL was written with the two events joined (captured before cleanup) + expect(capturedJsonl).not.toBeNull(); + expect(capturedJsonl!.split("\n")).toHaveLength(2); + + // claude -p was called with the prompt template expanded + expect(execFileSyncMock).toHaveBeenCalledTimes(1); + const calledArgs = execFileSyncMock.mock.calls[0][1] as string[]; + expect(calledArgs[0]).toBe("-p"); + expect(calledArgs).toContain("--no-session-persistence"); + expect(calledArgs).toContain("--model"); + expect(calledArgs).toContain("haiku"); + expect(calledArgs).toContain("--permission-mode"); + expect(calledArgs).toContain("bypassPermissions"); + + // Prompt template was expanded with real values + const prompt = calledArgs[1]; + expect(prompt).toContain("SID=sid-worker"); + expect(prompt).toContain("PROJ=proj"); + expect(prompt).toContain("LINES=2"); + expect(prompt).toContain("OFFSET=0"); + expect(prompt).toContain("SRC=/sessions/alice/alice_org_default_sid-worker.jsonl"); + + // env flags on execFileSync to prevent runaway recursion + const execOpts = execFileSyncMock.mock.calls[0][2]; + expect(execOpts.env.HIVEMIND_WIKI_WORKER).toBe("1"); + expect(execOpts.env.HIVEMIND_CAPTURE).toBe("false"); + + // upload was called with the full summary + expect(uploadSummaryMock).toHaveBeenCalledTimes(1); + const uploadParams = uploadSummaryMock.mock.calls[0][1]; + expect(uploadParams.tableName).toBe("memory"); + expect(uploadParams.agent).toBe("claude_code"); + expect(uploadParams.text).toContain("## What Happened"); + + // finalize + release + expect(finalizeSummaryMock).toHaveBeenCalledWith("sid-worker", 2); + expect(releaseLockMock).toHaveBeenCalledWith("sid-worker"); + }); + + it("parses JSONL offset from an existing summary on a resumed session", async () => { + mkFetch(undefined, 1, true); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[1].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# Session sid-worker\n\n## What Happened\ndone.\n"); + return Buffer.from(""); + }); + await runWorker(); + const prompt = execFileSyncMock.mock.calls[0][1][1] as string; + expect(prompt).toContain("OFFSET=12"); + // tmpSummary was pre-seeded with the existing summary so claude -p + // can merge on top. Verify the worker did write it. + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("existing summary found, offset=12"); + }); + + it("defaults to /sessions/unknown/ when the path SELECT returns no rows", async () => { + mkFetch(undefined, 0); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[1].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# Session\n\n## What Happened\nfallback.\n"); + return Buffer.from(""); + }); + await runWorker(); + const prompt = execFileSyncMock.mock.calls[0][1][1] as string; + expect(prompt).toContain("SRC=/sessions/unknown/sid-worker.jsonl"); + }); + + it("serializes event rows that arrive as objects (JSONB) instead of strings", async () => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message, creation_date")) { + return jsonResp({ + columns: ["message", "creation_date"], + rows: [ + [{ type: "user_message", content: "hi" }, "2026-04-20T00:00:00Z"], + [{ type: "tool_call", tool_name: "Bash" }, "2026-04-20T00:00:01Z"], + ], + }); + } + if (sql.startsWith("SELECT DISTINCT path")) { + return jsonResp({ columns: ["path"], rows: [["/sessions/alice/x.jsonl"]] }); + } + return jsonResp({ columns: ["summary"], rows: [] }); + }); + let capturedJsonl: string | null = null; + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const jsonlPath = args[1].match(/JSONL=(\S+)/)![1]; + capturedJsonl = readFileSync(jsonlPath, "utf-8"); + const summaryPath = args[1].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "x"); + return Buffer.from(""); + }); + await runWorker(); + expect(capturedJsonl).toContain('"type":"user_message"'); + expect(capturedJsonl).toContain('"type":"tool_call"'); + }); +}); + +// ═══ claude -p failure paths ═══════════════════════════════════════════════ + +describe("wiki-worker — claude -p failure", () => { + it("logs the claude exit code and skips the upload when no summary file lands", async () => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message")) return jsonResp({ columns: ["message", "creation_date"], rows: [["{}", "t"]] }); + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/sessions/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + const err: any = new Error("claude boom"); + err.status = 42; + execFileSyncMock.mockImplementation(() => { throw err; }); + await runWorker(); + + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("claude -p failed: 42"); + expect(log).toContain("no summary file generated"); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(finalizeSummaryMock).not.toHaveBeenCalled(); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("falls back to err.message when err.status is absent", async () => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message")) return jsonResp({ columns: ["message", "creation_date"], rows: [["{}", "t"]] }); + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + execFileSyncMock.mockImplementation(() => { throw new Error("no status"); }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("claude -p failed: no status"); + }); +}); + +// ═══ query retry logic ═════════════════════════════════════════════════════ + +describe("wiki-worker — query retry logic", () => { + beforeEach(() => { + // Stub setTimeout so retries don't actually sleep. + vi.spyOn(global, "setTimeout").mockImplementation(((cb: any) => { + cb(); + return 0 as any; + }) as any); + }); + + it("retries on 500 and eventually succeeds", async () => { + const responses = [ + jsonResp("server error", false, 500), + jsonResp("server error", false, 500), + jsonResp({ columns: ["message", "creation_date"], rows: [] }), + ]; + fetchMock.mockImplementation(async () => responses.shift()!); + await runWorker(); + // First query to sessions table was retried 2 times before success. + expect(fetchMock.mock.calls.length).toBeGreaterThanOrEqual(3); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("retries on 401/403/429/502/503 (CloudFlare rate-limit class)", async () => { + for (const status of [401, 403, 429, 502, 503]) { + fetchMock.mockReset(); + fetchMock + .mockResolvedValueOnce(jsonResp("", false, status)) + .mockResolvedValue(jsonResp({ columns: ["message", "creation_date"], rows: [] })); + await runWorker(); + expect(fetchMock.mock.calls.length).toBeGreaterThanOrEqual(2); + } + }); + + it("throws (and main catches) on a non-retryable 400", async () => { + fetchMock.mockResolvedValue(jsonResp("bad request", false, 400)); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toMatch(/fatal: API 400/); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("gives up after exhausting retries on persistent 500", async () => { + fetchMock.mockResolvedValue(jsonResp("still down", false, 500)); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toMatch(/fatal: API 500/); + }); +}); + +// ═══ finalize + release edge cases ═════════════════════════════════════════ + +describe("wiki-worker — finalize + release edge cases", () => { + beforeEach(() => { + fetchMock.mockImplementation(async (_url: string, init: any) => { + const sql = JSON.parse(init.body).query as string; + if (sql.startsWith("SELECT message")) return jsonResp({ columns: ["message", "creation_date"], rows: [["{}", "t"]] }); + if (sql.startsWith("SELECT DISTINCT path")) return jsonResp({ columns: ["path"], rows: [["/x.jsonl"]] }); + return jsonResp({ columns: ["summary"], rows: [] }); + }); + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[1].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, "# s\n## What Happened\nX\n"); + return Buffer.from(""); + }); + }); + + it("logs sidecar update failure but still releases the lock", async () => { + finalizeSummaryMock.mockImplementation(() => { throw new Error("sidecar boom"); }); + await runWorker(); + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("sidecar update failed: sidecar boom"); + expect(releaseLockMock).toHaveBeenCalled(); + }); + + it("keeps going when releaseLock throws — the finally swallows it", async () => { + releaseLockMock.mockImplementation(() => { throw new Error("release boom"); }); + await runWorker(); + // Worker still completes; the failure is caught in the finally. + const log = readFileSync(join(hooksDir, "wiki.log"), "utf-8"); + expect(log).toContain("done"); + }); + + it("does not upload when the summary file is present but empty", async () => { + execFileSyncMock.mockImplementation((_bin: string, args: string[]) => { + const summaryPath = args[1].match(/SUMMARY=(\S+)/)![1]; + writeFileSync(summaryPath, " \n"); + return Buffer.from(""); + }); + await runWorker(); + expect(uploadSummaryMock).not.toHaveBeenCalled(); + expect(finalizeSummaryMock).not.toHaveBeenCalled(); + }); +}); From 3d154545834363f942bec7410afb318bec7a21d8 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 22:13:05 +0000 Subject: [PATCH 29/39] fix(sql): use ESCAPE '\' on LIKE clauses that consume sqlLike() output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `sqlLike(value)` escapes `_` and `%` in the value by prefixing them with backslashes so callers can interpolate user-controlled strings inside `LIKE 'pattern'` literals. But the Deeplake SQL backend does not treat backslash as the LIKE escape character by default — without an explicit `ESCAPE '\'` clause, `\_` becomes two literal characters in the pattern instead of a literal `_`, so queries whose paths contain underscores silently return nothing. Empirically reproduced on the `locomo_benchmark/baseline` workspace: grep -l Caroline /home/.deeplake/memory/sessions/*.json → returns 20+ session paths (works: path has no underscores past the final slash, sqlLike produces '/sessions/%.json') grep -i hike /home/.deeplake/memory/sessions/conv_0_session_*.json → returns (no matches) before this fix — because the SQL becomes path LIKE '/sessions/conv\_0\_session\_%.json' and Deeplake matches `\_` literally against `_` → zero rows → returns real matches after this fix (ESCAPE '\' added, `\_` is now interpreted as literal `_`, matches the underscored paths) Same symptom in the 100-QA post-fix baseline_cloud run: 15 / 100 QA that local baseline answered correctly came back wrong/partial in the cloud, and the tool-call transcripts show repeated `(no matches)` on grep commands whose glob mentions `conv__session_*.json`. The fix appends ` ESCAPE '\'` to every `LIKE '...'` clause that is fed from `sqlLike()`: - src/shell/grep-core.ts:buildPathCondition — both the wildcard path branch and the directory-prefix branch. - src/hooks/virtual-table-query.ts:buildDirFilter — per-dir `path LIKE '/%'` clauses used by listVirtualPathRowsForDirs. - src/hooks/virtual-table-query.ts:findVirtualPaths — both the memoryTable and sessionsTable branches, on both the path and the filename LIKE clauses. Codex/Claude Code find fallbacks and `bash-command-compiler`'s `find_grep` path ultimately call `findVirtualPaths`, so they inherit the fix without a local change. Rebuild updates the 8 Claude Code and 8 Codex bundles. Verified via a targeted reproducer that drives `processPreToolUse` with the same glob commands against the real baseline workspace: all three underscored-glob greps return real matches after the fix, where previously they returned `(no matches)`. --- claude-code/bundle/pre-tool-use.js | 8 ++++---- claude-code/bundle/shell/deeplake-shell.js | 4 ++-- codex/bundle/pre-tool-use.js | 8 ++++---- codex/bundle/shell/deeplake-shell.js | 4 ++-- src/hooks/virtual-table-query.ts | 6 +++--- src/shell/grep-core.ts | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index 4d7e9de..d087dd7 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -630,13 +630,13 @@ function buildPathCondition(targetPath) { const clean = targetPath.replace(/\/+$/, ""); if (/[*?]/.test(clean)) { const likePattern = sqlLike(clean).replace(/\*/g, "%").replace(/\?/g, "_"); - return `path LIKE '${likePattern}'`; + return `path LIKE '${likePattern}' ESCAPE '\\'`; } const base = clean.split("/").pop() ?? ""; if (base.includes(".")) { return `path = '${sqlStr(clean)}'`; } - return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%')`; + return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%' ESCAPE '\\')`; } async function searchDeeplakeTables(api, memoryTable, sessionsTable, opts) { const { pathFilter, contentScanOnly, likeOp, escapedPattern, prefilterPattern, prefilterPatterns } = opts; @@ -1080,7 +1080,7 @@ function buildDirFilter(dirs) { const cleaned = [...new Set(dirs.map((dir) => dir.replace(/\/+$/, "") || "/"))]; if (cleaned.length === 0 || cleaned.includes("/")) return ""; - const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%'`); + const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%' ESCAPE '\\'`); return ` WHERE ${clauses.join(" OR ")}`; } async function queryUnionRows(api, memoryQuery, sessionsQuery) { @@ -1170,7 +1170,7 @@ async function listVirtualPathRows(api, memoryTable, sessionsTable, dir) { async function findVirtualPaths(api, memoryTable, sessionsTable, dir, filenamePattern) { const normalizedDir = dir.replace(/\/+$/, "") || "/"; const likePath = `${sqlLike(normalizedDir === "/" ? "" : normalizedDir)}/%`; - const rows = await queryUnionRows(api, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`); + const rows = await queryUnionRows(api, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`); return [...new Set(rows.map((row) => row["path"]).filter((value) => typeof value === "string" && value.length > 0))]; } function dedupeRowsByPath(rows) { diff --git a/claude-code/bundle/shell/deeplake-shell.js b/claude-code/bundle/shell/deeplake-shell.js index 10a40c9..0793149 100755 --- a/claude-code/bundle/shell/deeplake-shell.js +++ b/claude-code/bundle/shell/deeplake-shell.js @@ -67317,13 +67317,13 @@ function buildPathCondition(targetPath) { const clean = targetPath.replace(/\/+$/, ""); if (/[*?]/.test(clean)) { const likePattern = sqlLike(clean).replace(/\*/g, "%").replace(/\?/g, "_"); - return `path LIKE '${likePattern}'`; + return `path LIKE '${likePattern}' ESCAPE '\\'`; } const base = clean.split("/").pop() ?? ""; if (base.includes(".")) { return `path = '${sqlStr(clean)}'`; } - return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%')`; + return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%' ESCAPE '\\')`; } async function searchDeeplakeTables(api, memoryTable, sessionsTable, opts) { const { pathFilter, contentScanOnly, likeOp, escapedPattern, prefilterPattern, prefilterPatterns } = opts; diff --git a/codex/bundle/pre-tool-use.js b/codex/bundle/pre-tool-use.js index 37fb1c2..fb75ccb 100755 --- a/codex/bundle/pre-tool-use.js +++ b/codex/bundle/pre-tool-use.js @@ -616,13 +616,13 @@ function buildPathCondition(targetPath) { const clean = targetPath.replace(/\/+$/, ""); if (/[*?]/.test(clean)) { const likePattern = sqlLike(clean).replace(/\*/g, "%").replace(/\?/g, "_"); - return `path LIKE '${likePattern}'`; + return `path LIKE '${likePattern}' ESCAPE '\\'`; } const base = clean.split("/").pop() ?? ""; if (base.includes(".")) { return `path = '${sqlStr(clean)}'`; } - return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%')`; + return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%' ESCAPE '\\')`; } async function searchDeeplakeTables(api, memoryTable, sessionsTable, opts) { const { pathFilter, contentScanOnly, likeOp, escapedPattern, prefilterPattern, prefilterPatterns } = opts; @@ -1066,7 +1066,7 @@ function buildDirFilter(dirs) { const cleaned = [...new Set(dirs.map((dir) => dir.replace(/\/+$/, "") || "/"))]; if (cleaned.length === 0 || cleaned.includes("/")) return ""; - const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%'`); + const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%' ESCAPE '\\'`); return ` WHERE ${clauses.join(" OR ")}`; } async function queryUnionRows(api, memoryQuery, sessionsQuery) { @@ -1156,7 +1156,7 @@ async function listVirtualPathRows(api, memoryTable, sessionsTable, dir) { async function findVirtualPaths(api, memoryTable, sessionsTable, dir, filenamePattern) { const normalizedDir = dir.replace(/\/+$/, "") || "/"; const likePath = `${sqlLike(normalizedDir === "/" ? "" : normalizedDir)}/%`; - const rows = await queryUnionRows(api, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`); + const rows = await queryUnionRows(api, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`, `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`); return [...new Set(rows.map((row) => row["path"]).filter((value) => typeof value === "string" && value.length > 0))]; } function dedupeRowsByPath(rows) { diff --git a/codex/bundle/shell/deeplake-shell.js b/codex/bundle/shell/deeplake-shell.js index 10a40c9..0793149 100755 --- a/codex/bundle/shell/deeplake-shell.js +++ b/codex/bundle/shell/deeplake-shell.js @@ -67317,13 +67317,13 @@ function buildPathCondition(targetPath) { const clean = targetPath.replace(/\/+$/, ""); if (/[*?]/.test(clean)) { const likePattern = sqlLike(clean).replace(/\*/g, "%").replace(/\?/g, "_"); - return `path LIKE '${likePattern}'`; + return `path LIKE '${likePattern}' ESCAPE '\\'`; } const base = clean.split("/").pop() ?? ""; if (base.includes(".")) { return `path = '${sqlStr(clean)}'`; } - return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%')`; + return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%' ESCAPE '\\')`; } async function searchDeeplakeTables(api, memoryTable, sessionsTable, opts) { const { pathFilter, contentScanOnly, likeOp, escapedPattern, prefilterPattern, prefilterPatterns } = opts; diff --git a/src/hooks/virtual-table-query.ts b/src/hooks/virtual-table-query.ts index 736bb5a..a430a35 100644 --- a/src/hooks/virtual-table-query.ts +++ b/src/hooks/virtual-table-query.ts @@ -53,7 +53,7 @@ function buildInList(paths: string[]): string { function buildDirFilter(dirs: string[]): string { const cleaned = [...new Set(dirs.map(dir => dir.replace(/\/+$/, "") || "/"))]; if (cleaned.length === 0 || cleaned.includes("/")) return ""; - const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%'`); + const clauses = cleaned.map((dir) => `path LIKE '${sqlLike(dir)}/%' ESCAPE '\\'`); return ` WHERE ${clauses.join(" OR ")}`; } @@ -196,8 +196,8 @@ export async function findVirtualPaths( const likePath = `${sqlLike(normalizedDir === "/" ? "" : normalizedDir)}/%`; const rows = await queryUnionRows( api, - `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`, - `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' AND filename LIKE '${filenamePattern}'`, + `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 0 AS source_order FROM "${memoryTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`, + `SELECT path, NULL::text AS content, NULL::bigint AS size_bytes, '' AS creation_date, 1 AS source_order FROM "${sessionsTable}" WHERE path LIKE '${likePath}' ESCAPE '\\' AND filename LIKE '${filenamePattern}' ESCAPE '\\'`, ); return [...new Set( diff --git a/src/shell/grep-core.ts b/src/shell/grep-core.ts index abad499..6e93c5b 100644 --- a/src/shell/grep-core.ts +++ b/src/shell/grep-core.ts @@ -234,13 +234,13 @@ function buildPathCondition(targetPath: string): string { const clean = targetPath.replace(/\/+$/, ""); if (/[*?]/.test(clean)) { const likePattern = sqlLike(clean).replace(/\*/g, "%").replace(/\?/g, "_"); - return `path LIKE '${likePattern}'`; + return `path LIKE '${likePattern}' ESCAPE '\\'`; } const base = clean.split("/").pop() ?? ""; if (base.includes(".")) { return `path = '${sqlStr(clean)}'`; } - return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%')`; + return `(path = '${sqlStr(clean)}' OR path LIKE '${sqlLike(clean)}/%' ESCAPE '\\')`; } /** From df780729e1856be3162195fd685815cc24d0b806 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 20 Apr 2026 22:27:14 +0000 Subject: [PATCH 30/39] chore: bump version to 0.6.38 --- .claude-plugin/marketplace.json | 4 ++-- .claude-plugin/plugin.json | 2 +- claude-code/.claude-plugin/plugin.json | 2 +- codex/package.json | 2 +- openclaw/openclaw.plugin.json | 2 +- openclaw/package.json | 2 +- package-lock.json | 4 ++-- package.json | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 4fd6dc3..427fc2e 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -6,13 +6,13 @@ }, "metadata": { "description": "Cloud-backed persistent shared memory for AI agents powered by Deeplake", - "version": "0.6.37" + "version": "0.6.38" }, "plugins": [ { "name": "hivemind", "description": "Persistent shared memory powered by Deeplake — captures all session activity and provides cross-session, cross-agent memory search", - "version": "0.6.37", + "version": "0.6.38", "source": "./claude-code", "homepage": "https://github.com/activeloopai/hivemind" } diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json index dddddbd..e178805 100644 --- a/.claude-plugin/plugin.json +++ b/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "hivemind", "description": "Cloud-backed persistent memory powered by Deeplake — read, write, and share memory across Claude Code sessions and agents", - "version": "0.6.37", + "version": "0.6.38", "author": { "name": "Activeloop", "url": "https://deeplake.ai" diff --git a/claude-code/.claude-plugin/plugin.json b/claude-code/.claude-plugin/plugin.json index dddddbd..e178805 100644 --- a/claude-code/.claude-plugin/plugin.json +++ b/claude-code/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "hivemind", "description": "Cloud-backed persistent memory powered by Deeplake — read, write, and share memory across Claude Code sessions and agents", - "version": "0.6.37", + "version": "0.6.38", "author": { "name": "Activeloop", "url": "https://deeplake.ai" diff --git a/codex/package.json b/codex/package.json index 377c98a..0a42990 100644 --- a/codex/package.json +++ b/codex/package.json @@ -1,6 +1,6 @@ { "name": "hivemind-codex", - "version": "0.6.37", + "version": "0.6.38", "description": "Cloud-backed persistent shared memory for OpenAI Codex CLI powered by Deeplake", "type": "module" } diff --git a/openclaw/openclaw.plugin.json b/openclaw/openclaw.plugin.json index 485df8d..04cdf6c 100644 --- a/openclaw/openclaw.plugin.json +++ b/openclaw/openclaw.plugin.json @@ -23,5 +23,5 @@ } } }, - "version": "0.6.37" + "version": "0.6.38" } diff --git a/openclaw/package.json b/openclaw/package.json index 712bffd..31161cb 100644 --- a/openclaw/package.json +++ b/openclaw/package.json @@ -1,6 +1,6 @@ { "name": "hivemind", - "version": "0.6.37", + "version": "0.6.38", "type": "module", "description": "Hivemind — cloud-backed persistent shared memory for AI agents, powered by DeepLake", "license": "Apache-2.0", diff --git a/package-lock.json b/package-lock.json index c3c6cba..f0ebfcc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "hivemind", - "version": "0.6.37", + "version": "0.6.38", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "hivemind", - "version": "0.6.37", + "version": "0.6.38", "dependencies": { "deeplake": "^0.3.30", "just-bash": "^2.14.0", diff --git a/package.json b/package.json index 9f8cf74..c503dd2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "hivemind", - "version": "0.6.37", + "version": "0.6.38", "description": "Cloud-backed persistent shared memory for AI agents powered by Deeplake", "type": "module", "bin": { From 2c0d65d58f244d20ca65b123fa892e34b46e57a2 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 22:27:36 +0000 Subject: [PATCH 31/39] fix(output): cap plugin tool results at 8 KB to avoid Claude Code's preview truncation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Claude Code's Bash tool silently persists any tool_result larger than ~16 KB to disk and replaces it with a 2 KB preview plus a path to the persisted file. The model almost never recovers from that replacement: in the locomo `baseline_cloud_100qa_fix123` run (100 QA, all fixes #1 / #2 / #3 applied), 11 / 14 losing QAs that hit the persist path never read the persisted file even once, and finished on the truncated 2 KB preview — which was rarely enough to carry the answer. Typical triggers from that run: - `grep -r Caroline /home/.deeplake/memory/` → 66 KB of dialogue lines because the name appears in nearly every session. - `for f in /.../sessions/conv_0_session_*.json; do grep ...; done` → 926 KB of concatenated grep output (slow-path shell bundle). - `cat /.../sessions/conv_0_session_*.json` (glob over many files) → tens of KB of JSON. This fix introduces `src/utils/output-cap.ts` with `capOutputForClaude(output, {kind})` and applies it on the plugin's exit paths before Claude Code sees the result: - `grep-direct.ts:handleGrepDirect` — caps grep's combined output. - `bash-command-compiler.ts:executeCompiledBashCommand` — caps the final concatenation of compiled segments (cat / ls / find / grep / find_grep, incl. `&&` and `;` pipelines). - `pre-tool-use.ts` direct read path — caps `cat` / `head` / `tail` Bash intercepts. Read-tool intercepts are unaffected: they write content to disk and return a `file_path`, so no size pressure from Claude Code's preview truncation applies. - `pre-tool-use.ts` direct `ls` and `find` fallbacks — capped too. Cap is 8 KB (CLAUDE_OUTPUT_CAP_BYTES), comfortably under Claude Code's ~16 KB persist threshold and 4× the 2 KB preview the model used to get. When the cap fires, the output is truncated at a line boundary and the tail gets a short footer: ... [grep truncated: 313 more lines (58.4 KB) elided — refine with '| head -N' or a tighter pattern] The footer names the operation (grep / cat / ls / find / bash) and gives the model an actionable next step. Unit tests in `claude-code/tests/output-cap.test.ts` (8 tests): - No-op for inputs that fit the cap, including empty strings. - Byte size after cap is ≤ CLAUDE_OUTPUT_CAP_BYTES. - Truncation aligns to line boundaries; footer line counts add up to the original total. - Single oversized line (no newline) is byte-sliced with a footer. - Custom `maxBytes` is honoured (no silent 1 KB floor). - Default footer kind is "output" when no kind is passed. - A realistic 400-line grep fixture that exceeds 16 KB gets capped above 4 KB and under the cap — strictly more useful than the 2 KB preview. Bundle rebuild propagates the change to the 8 Claude Code and 8 Codex bundles. Verified empirically via `processPreToolUse` against the real `locomo_benchmark/baseline` workspace: grep -r Caroline /home/.deeplake/memory/ before fix #5: ~66 KB of output, Claude Code truncated to 2 KB. after fix #5: ~7.9 KB (313 lines kept, 313 more elided, footer). grep -r 'Caroline|Melanie' /home/.deeplake/memory/ before: ~70 KB. after: ~7.9 KB with footer reporting 391 lines elided. cat /home/.deeplake/memory/sessions/conv_0_session_1.json ~2 KB — unchanged, well under the cap. Expected impact on the 100-QA baseline_cloud benchmark: 11 QAs that lost points purely because of the 2 KB preview now see up to 8 KB of the same grep output. Combined with fix #4 (19 QAs with (no matches) from SQL LIKE under-escaping), the plugin should close the remaining ~7.5 pt gap to the local-files baseline (75.0 %) and likely match or exceed it. --- claude-code/bundle/pre-tool-use.js | 52 +++++++++++++-- claude-code/tests/output-cap.test.ts | 94 ++++++++++++++++++++++++++++ codex/bundle/pre-tool-use.js | 43 ++++++++++++- src/hooks/bash-command-compiler.ts | 3 +- src/hooks/grep-direct.ts | 4 +- src/hooks/pre-tool-use.ts | 13 +++- src/utils/output-cap.ts | 74 ++++++++++++++++++++++ 7 files changed, 271 insertions(+), 12 deletions(-) create mode 100644 claude-code/tests/output-cap.test.ts create mode 100644 src/utils/output-cap.ts diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index d087dd7..84b5152 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -795,6 +795,44 @@ async function grepBothTables(api, memoryTable, sessionsTable, params, targetPat return refineGrepMatches(normalized, params); } +// dist/src/utils/output-cap.js +var CLAUDE_OUTPUT_CAP_BYTES = 8 * 1024; +function byteLen(str) { + return Buffer.byteLength(str, "utf8"); +} +function capOutputForClaude(output, options = {}) { + const maxBytes = options.maxBytes ?? CLAUDE_OUTPUT_CAP_BYTES; + if (byteLen(output) <= maxBytes) + return output; + const kind = options.kind ?? "output"; + const footerReserve = 220; + const budget = Math.max(1, maxBytes - footerReserve); + let cut = 0; + let running = 0; + const lines = output.split("\n"); + const keptLines = []; + for (const line of lines) { + const lineBytes = byteLen(line) + 1; + if (running + lineBytes > budget) + break; + keptLines.push(line); + running += lineBytes; + cut += lineBytes; + } + if (keptLines.length === 0) { + const slice = Buffer.from(output, "utf8").slice(0, budget).toString("utf8"); + const footer2 = ` +... [${kind} truncated: ${(byteLen(output) / 1024).toFixed(1)} KB total; refine with '| head -N' or a tighter pattern]`; + return slice + footer2; + } + const totalLines = lines.length; + const elidedLines = totalLines - keptLines.length; + const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); + const footer = ` +... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided \u2014 refine with '| head -N' or a tighter pattern]`; + return keptLines.join("\n") + footer; +} + // dist/src/hooks/grep-direct.js function splitFirstPipelineStage(cmd) { const input = cmd.trim(); @@ -1034,7 +1072,8 @@ async function handleGrepDirect(api, table, sessionsTable, params) { fixedString: params.fixedString }; const output = await grepBothTables(api, table, sessionsTable, matchParams, params.targetPath); - return output.join("\n") || "(no matches)"; + const joined = output.join("\n") || "(no matches)"; + return capOutputForClaude(joined, { kind: "grep" }); } // dist/src/hooks/virtual-table-query.js @@ -1649,7 +1688,7 @@ async function executeCompiledBashCommand(api, memoryTable, sessionsTable, cmd, continue; } } - return outputs.join("\n"); + return capOutputForClaude(outputs.join("\n"), { kind: "bash" }); } // dist/src/hooks/query-cache.js @@ -2040,7 +2079,8 @@ async function processPreToolUse(input, deps = {}) { const file_path = writeReadCacheFileFn(input.session_id, virtualPath, content); return buildReadDecision(file_path, `[DeepLake direct] ${label} ${virtualPath}`); } - return buildAllowDecision(`echo ${JSON.stringify(content)}`, `[DeepLake direct] ${label} ${virtualPath}`); + const capped = capOutputForClaude(content, { kind: label }); + return buildAllowDecision(`echo ${JSON.stringify(capped)}`, `[DeepLake direct] ${label} ${virtualPath}`); } } if (!lsDir && input.tool_name === "Glob") { @@ -2085,7 +2125,8 @@ async function processPreToolUse(input, deps = {}) { lines.push(name + (info.isDir ? "/" : "")); } } - return buildAllowDecision(`echo ${JSON.stringify(lines.join("\n") || "(empty directory)")}`, `[DeepLake direct] ls ${dir}`); + const lsOutput = capOutputForClaude(lines.join("\n") || "(empty directory)", { kind: "ls" }); + return buildAllowDecision(`echo ${JSON.stringify(lsOutput)}`, `[DeepLake direct] ls ${dir}`); } if (input.tool_name === "Bash") { const findMatch = shellCmd.match(/^find\s+(\S+)\s+(?:-type\s+\S+\s+)?-name\s+'([^']+)'/); @@ -2097,7 +2138,8 @@ async function processPreToolUse(input, deps = {}) { let result = paths.join("\n") || ""; if (/\|\s*wc\s+-l\s*$/.test(shellCmd)) result = String(paths.length); - return buildAllowDecision(`echo ${JSON.stringify(result || "(no matches)")}`, `[DeepLake direct] find ${dir}`); + const capped = capOutputForClaude(result || "(no matches)", { kind: "find" }); + return buildAllowDecision(`echo ${JSON.stringify(capped)}`, `[DeepLake direct] find ${dir}`); } } } catch (e) { diff --git a/claude-code/tests/output-cap.test.ts b/claude-code/tests/output-cap.test.ts new file mode 100644 index 0000000..5c59049 --- /dev/null +++ b/claude-code/tests/output-cap.test.ts @@ -0,0 +1,94 @@ +/** + * Cap for large tool outputs (fix #5). + * + * Claude Code's Bash tool silently persists tool_result strings larger + * than ~16 KB to disk and shows the model a 2 KB preview plus a path. + * In the locomo baseline_cloud_100qa_fix123 run, 11 of 14 losing QAs + * that hit this path never recovered the persisted file — the preview + * was too small to carry the answer and the model gave up. `capOutput- + * ForClaude` truncates at line boundaries below Claude Code's threshold + * and replaces the tail with a footer that tells the model how to + * refine the next call. + */ + +import { describe, expect, it } from "vitest"; +import { + CLAUDE_OUTPUT_CAP_BYTES, + capOutputForClaude, +} from "../../src/utils/output-cap.js"; + +describe("capOutputForClaude", () => { + it("returns the input unchanged when it fits under the cap", () => { + const short = "line1\nline2\nline3"; + expect(capOutputForClaude(short)).toBe(short); + }); + + it("is a no-op for an empty string and single short line", () => { + expect(capOutputForClaude("")).toBe(""); + expect(capOutputForClaude("hello")).toBe("hello"); + }); + + it("truncates at a line boundary once the input exceeds the cap", () => { + const line = "x".repeat(100); + const input = Array.from({ length: 200 }, (_, i) => `${i}:${line}`).join("\n"); + const out = capOutputForClaude(input, { kind: "grep" }); + + expect(Buffer.byteLength(out, "utf8")).toBeLessThanOrEqual(CLAUDE_OUTPUT_CAP_BYTES); + // Last surviving line must be whole — no dangling partial line before the footer. + const body = out.split("\n... [")[0]; + expect(body.split("\n").every((l) => l.startsWith(""))).toBe(true); + // Footer names the kind and reports elided line count / byte count. + expect(out).toMatch(/\[grep truncated: \d+ more lines \([\d.]+ KB\) elided — refine with '\| head -N' or a tighter pattern\]/); + }); + + it("reports the correct number of elided lines in the footer", () => { + const line = "x".repeat(100); + const input = Array.from({ length: 500 }, () => line).join("\n"); + const out = capOutputForClaude(input, { kind: "cat" }); + + const bodyLines = out.split("\n... [")[0].split("\n").length; + const footerMatch = out.match(/(\d+) more lines/); + expect(footerMatch).not.toBeNull(); + const elided = Number(footerMatch![1]); + // Body + elided should account for all original lines. + expect(bodyLines + elided).toBe(500); + }); + + it("handles a single oversized line by taking a byte prefix", () => { + // One giant line — no newlines to cut on. + const input = "a".repeat(CLAUDE_OUTPUT_CAP_BYTES * 3); + const out = capOutputForClaude(input, { kind: "grep" }); + + expect(Buffer.byteLength(out, "utf8")).toBeLessThanOrEqual(CLAUDE_OUTPUT_CAP_BYTES); + expect(out).toContain("[grep truncated:"); + expect(out).toMatch(/[\d.]+ KB total/); + }); + + it("uses a custom maxBytes when provided", () => { + const input = Array.from({ length: 20 }, (_, i) => `line${i}:${"x".repeat(80)}`).join("\n"); + const out = capOutputForClaude(input, { maxBytes: 500, kind: "ls" }); + + expect(Buffer.byteLength(out, "utf8")).toBeLessThanOrEqual(500); + expect(out).toContain("[ls truncated:"); + }); + + it("defaults the footer kind to 'output' when no kind is provided", () => { + const input = "x".repeat(CLAUDE_OUTPUT_CAP_BYTES * 2); + const out = capOutputForClaude(input); + expect(out).toContain("[output truncated:"); + }); + + it("produces output well under Claude Code's ~16 KB persist threshold", () => { + const bigGrepLine = (i: number) => + `/sessions/conv_${i % 10}_session_${i}.json:[D${i}:1] Caroline: ${"x".repeat(160)}`; + const input = Array.from({ length: 400 }, (_, i) => bigGrepLine(i)).join("\n"); + const inputSize = Buffer.byteLength(input, "utf8"); + expect(inputSize).toBeGreaterThan(16 * 1024); // confirm the fixture triggers truncation + + const out = capOutputForClaude(input, { kind: "grep" }); + // 2 KB preview was the painful case — we must give the model notably more + // than that, but still fit comfortably below the 16 KB persist threshold. + expect(Buffer.byteLength(out, "utf8")).toBeGreaterThan(4 * 1024); + expect(Buffer.byteLength(out, "utf8")).toBeLessThanOrEqual(CLAUDE_OUTPUT_CAP_BYTES); + }); +}); diff --git a/codex/bundle/pre-tool-use.js b/codex/bundle/pre-tool-use.js index fb75ccb..45ebaf5 100755 --- a/codex/bundle/pre-tool-use.js +++ b/codex/bundle/pre-tool-use.js @@ -781,6 +781,44 @@ async function grepBothTables(api, memoryTable, sessionsTable, params, targetPat return refineGrepMatches(normalized, params); } +// dist/src/utils/output-cap.js +var CLAUDE_OUTPUT_CAP_BYTES = 8 * 1024; +function byteLen(str) { + return Buffer.byteLength(str, "utf8"); +} +function capOutputForClaude(output, options = {}) { + const maxBytes = options.maxBytes ?? CLAUDE_OUTPUT_CAP_BYTES; + if (byteLen(output) <= maxBytes) + return output; + const kind = options.kind ?? "output"; + const footerReserve = 220; + const budget = Math.max(1, maxBytes - footerReserve); + let cut = 0; + let running = 0; + const lines = output.split("\n"); + const keptLines = []; + for (const line of lines) { + const lineBytes = byteLen(line) + 1; + if (running + lineBytes > budget) + break; + keptLines.push(line); + running += lineBytes; + cut += lineBytes; + } + if (keptLines.length === 0) { + const slice = Buffer.from(output, "utf8").slice(0, budget).toString("utf8"); + const footer2 = ` +... [${kind} truncated: ${(byteLen(output) / 1024).toFixed(1)} KB total; refine with '| head -N' or a tighter pattern]`; + return slice + footer2; + } + const totalLines = lines.length; + const elidedLines = totalLines - keptLines.length; + const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); + const footer = ` +... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided \u2014 refine with '| head -N' or a tighter pattern]`; + return keptLines.join("\n") + footer; +} + // dist/src/hooks/grep-direct.js function splitFirstPipelineStage(cmd) { const input = cmd.trim(); @@ -1020,7 +1058,8 @@ async function handleGrepDirect(api, table, sessionsTable, params) { fixedString: params.fixedString }; const output = await grepBothTables(api, table, sessionsTable, matchParams, params.targetPath); - return output.join("\n") || "(no matches)"; + const joined = output.join("\n") || "(no matches)"; + return capOutputForClaude(joined, { kind: "grep" }); } // dist/src/hooks/virtual-table-query.js @@ -1635,7 +1674,7 @@ async function executeCompiledBashCommand(api, memoryTable, sessionsTable, cmd, continue; } } - return outputs.join("\n"); + return capOutputForClaude(outputs.join("\n"), { kind: "bash" }); } // dist/src/hooks/query-cache.js diff --git a/src/hooks/bash-command-compiler.ts b/src/hooks/bash-command-compiler.ts index 4bf6ce0..68e1534 100644 --- a/src/hooks/bash-command-compiler.ts +++ b/src/hooks/bash-command-compiler.ts @@ -2,6 +2,7 @@ import type { DeeplakeApi } from "../deeplake-api.js"; import { sqlLike } from "../utils/sql.js"; import { type GrepParams, handleGrepDirect, parseBashGrep } from "./grep-direct.js"; import { normalizeContent, refineGrepMatches } from "../shell/grep-core.js"; +import { capOutputForClaude } from "../utils/output-cap.js"; import { listVirtualPathRowsForDirs, readVirtualPathContents, @@ -520,5 +521,5 @@ export async function executeCompiledBashCommand( } } - return outputs.join("\n"); + return capOutputForClaude(outputs.join("\n"), { kind: "bash" }); } diff --git a/src/hooks/grep-direct.ts b/src/hooks/grep-direct.ts index 77427bf..95e15d9 100644 --- a/src/hooks/grep-direct.ts +++ b/src/hooks/grep-direct.ts @@ -7,6 +7,7 @@ import type { DeeplakeApi } from "../deeplake-api.js"; import { grepBothTables, type GrepMatchParams } from "../shell/grep-core.js"; +import { capOutputForClaude } from "../utils/output-cap.js"; export interface GrepParams { pattern: string; @@ -229,5 +230,6 @@ export async function handleGrepDirect( }; const output = await grepBothTables(api, table, sessionsTable, matchParams, params.targetPath); - return output.join("\n") || "(no matches)"; + const joined = output.join("\n") || "(no matches)"; + return capOutputForClaude(joined, { kind: "grep" }); } diff --git a/src/hooks/pre-tool-use.ts b/src/hooks/pre-tool-use.ts index 1a3b43d..f55fbc7 100644 --- a/src/hooks/pre-tool-use.ts +++ b/src/hooks/pre-tool-use.ts @@ -23,6 +23,7 @@ import { writeCachedIndexContent, } from "./query-cache.js"; import { isSafe, touchesMemory, rewritePaths } from "./memory-path-utils.js"; +import { capOutputForClaude } from "../utils/output-cap.js"; export { isSafe, touchesMemory, rewritePaths }; @@ -354,11 +355,15 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT content = fromEnd ? lines.slice(-lineLimit).join("\n") : lines.slice(0, lineLimit).join("\n"); } const label = lineLimit > 0 ? (fromEnd ? `tail -${lineLimit}` : `head -${lineLimit}`) : "cat"; + // Read tool writes content to disk and Claude Code reads the file directly, + // so no size pressure; keep full content. Bash intercepts flow through + // Claude Code's 16 KB tool_result threshold so we cap before reaching it. if (input.tool_name === "Read") { const file_path = writeReadCacheFileFn(input.session_id, virtualPath, content); return buildReadDecision(file_path, `[DeepLake direct] ${label} ${virtualPath}`); } - return buildAllowDecision(`echo ${JSON.stringify(content)}`, `[DeepLake direct] ${label} ${virtualPath}`); + const capped = capOutputForClaude(content, { kind: label }); + return buildAllowDecision(`echo ${JSON.stringify(capped)}`, `[DeepLake direct] ${label} ${virtualPath}`); } } @@ -402,7 +407,8 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT lines.push(name + (info.isDir ? "/" : "")); } } - return buildAllowDecision(`echo ${JSON.stringify(lines.join("\n") || "(empty directory)")}`, `[DeepLake direct] ls ${dir}`); + const lsOutput = capOutputForClaude(lines.join("\n") || "(empty directory)", { kind: "ls" }); + return buildAllowDecision(`echo ${JSON.stringify(lsOutput)}`, `[DeepLake direct] ls ${dir}`); } if (input.tool_name === "Bash") { @@ -414,7 +420,8 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT const paths = await findVirtualPathsFn(api, table, sessionsTable, dir, namePattern); let result = paths.join("\n") || ""; if (/\|\s*wc\s+-l\s*$/.test(shellCmd)) result = String(paths.length); - return buildAllowDecision(`echo ${JSON.stringify(result || "(no matches)")}`, `[DeepLake direct] find ${dir}`); + const capped = capOutputForClaude(result || "(no matches)", { kind: "find" }); + return buildAllowDecision(`echo ${JSON.stringify(capped)}`, `[DeepLake direct] find ${dir}`); } } } catch (e: any) { diff --git a/src/utils/output-cap.ts b/src/utils/output-cap.ts new file mode 100644 index 0000000..c8db8a4 --- /dev/null +++ b/src/utils/output-cap.ts @@ -0,0 +1,74 @@ +/** + * Cap large tool outputs before they reach Claude Code. + * + * Claude Code's Bash tool silently persists any tool_result larger than + * ~16 KB to disk and replaces it with a 2 KB "preview" + a path to the + * persisted file. In the locomo `baseline_cloud_100qa_fix123` run, 11 + * out of 14 losing QAs that hit this path NEVER recovered — the model + * saw a 2 KB slice of grep output and gave up instead of reading the + * persisted file. For our workload 8 KB of meaningful content is + * consistently more useful to the model than 2 KB + a dangling file + * pointer, so we cap the plugin-returned output below that threshold + * and replace the tail with a footer that tells the model how to + * narrow the next call. + * + * The cap is applied at line boundaries to keep grep / cat output + * structure intact. A short footer indicates how many lines / bytes + * were elided and suggests refinements ("pipe to | head -N" or + * "tighten the pattern"). + */ + +export const CLAUDE_OUTPUT_CAP_BYTES = 8 * 1024; + +function byteLen(str: string): number { + return Buffer.byteLength(str, "utf8"); +} + +export interface CapOutputOptions { + /** Hint shown in the footer. Examples: "grep", "cat", "for-loop". */ + kind?: string; + /** Override the cap size (bytes). Defaults to CLAUDE_OUTPUT_CAP_BYTES. */ + maxBytes?: number; +} + +/** + * If `output` fits in the cap, return it unchanged. Otherwise truncate + * at the last newline that keeps the total (including footer) under the + * cap, and append a footer describing what was elided. + */ +export function capOutputForClaude(output: string, options: CapOutputOptions = {}): string { + const maxBytes = options.maxBytes ?? CLAUDE_OUTPUT_CAP_BYTES; + if (byteLen(output) <= maxBytes) return output; + + const kind = options.kind ?? "output"; + // Reserve ~200 bytes for the footer so it always fits within maxBytes. + const footerReserve = 220; + const budget = Math.max(1, maxBytes - footerReserve); + + // Find the last newline before the byte budget. Walk forward building + // the slice so the byte boundary stays valid even for multibyte UTF-8. + let cut = 0; + let running = 0; + const lines = output.split("\n"); + const keptLines: string[] = []; + for (const line of lines) { + const lineBytes = byteLen(line) + 1; // +1 for the newline + if (running + lineBytes > budget) break; + keptLines.push(line); + running += lineBytes; + cut += lineBytes; + } + + if (keptLines.length === 0) { + // A single line is already over budget — take a prefix and mark it. + const slice = Buffer.from(output, "utf8").slice(0, budget).toString("utf8"); + const footer = `\n... [${kind} truncated: ${(byteLen(output) / 1024).toFixed(1)} KB total; refine with '| head -N' or a tighter pattern]`; + return slice + footer; + } + + const totalLines = lines.length; + const elidedLines = totalLines - keptLines.length; + const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); + const footer = `\n... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided — refine with '| head -N' or a tighter pattern]`; + return keptLines.join("\n") + footer; +} From a5a1852fa2f5126248f22b39735819a12bc837a0 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 23:36:26 +0000 Subject: [PATCH 32/39] test(config): enforce 90% coverage on fix #1/#4/#5 source files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Append per-file thresholds in vitest.config.ts for the two source files that materially changed in this PR, holding them at the same 90 / 90 / 90 / 90 bar already applied to the grep-dual-table files from PR #60: - src/utils/output-cap.ts — new file, fix #5. Currently at 100 / 100 / 100 / 100 under the tests in claude-code/tests/output-cap.test.ts. - src/hooks/virtual-table-query.ts — rewritten for fix #1 (dual-table index generation) and fix #4 (ESCAPE '\' on LIKE clauses). Currently at 98.9 / 93.2 / 95.8 / 98.9 under claude-code/tests/virtual-table-query.test.ts and claude-code/tests/pre-tool-use-baseline-cloud.test.ts. Files left without new thresholds because their changes in this PR are small and localized: - src/hooks/pre-tool-use.ts — added a Read-intercept branch and a writeReadCacheFile helper; the broader file is covered by hooks-source.test.ts which is pre-failing on this branch (unrelated to the fixes in this PR). - src/deeplake-api.ts — moved TRACE_SQL from a module-level const into the traceSql function body (fix #3). - src/shell/deeplake-shell.ts — three env-var deletes in the one-shot entry (fix #3). --- vitest.config.ts | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/vitest.config.ts b/vitest.config.ts index 70df29d..2fb2c0b 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -65,6 +65,23 @@ export default defineConfig({ functions: 80, lines: 80, }, + // fix/index-md-include-sessions — 5-fix PR stacked on PR #61. + // output-cap.ts is new in this PR (fix #5); virtual-table-query.ts was + // heavily modified by fix #1 (index.md builder / fallback) and fix #4 + // (ESCAPE '\' on LIKE clauses). Held at 90 to match the rest of the + // plugin-hot-path files already at that bar. + "src/utils/output-cap.ts": { + statements: 90, + branches: 90, + functions: 90, + lines: 90, + }, + "src/hooks/virtual-table-query.ts": { + statements: 90, + branches: 90, + functions: 90, + lines: 90, + }, }, }, }, From c4c6c0f63b9c8db1c2956327d7ef46443cede090 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 23:41:13 +0000 Subject: [PATCH 33/39] test(grep-core): update buildPathFilter assertion for ESCAPE '\' from fix #4 Fix #4 (`3d15454`) appended `ESCAPE '\'` to every LIKE clause fed by `sqlLike()` so backslash-escaped `_` / `%` match their literal characters on the Deeplake backend. The existing buildPathFilter glob test still asserted the pre-fix SQL. Update the literal string and the regex so the assertion matches the new SQL shape, and annotate the case with a comment explaining why the ESCAPE clause is required. --- claude-code/tests/grep-core.test.ts | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/claude-code/tests/grep-core.test.ts b/claude-code/tests/grep-core.test.ts index 2a9a409..51339ff 100644 --- a/claude-code/tests/grep-core.test.ts +++ b/claude-code/tests/grep-core.test.ts @@ -447,11 +447,16 @@ describe("buildPathFilter", () => { ); }); it("uses LIKE matching for glob targets instead of exact file matching", () => { + // Fix #4 appends `ESCAPE '\'` so sqlLike-escaped underscores (`\_`) and + // percent signs (`\%`) in the pattern match their literal characters on + // the Deeplake backend. Without the ESCAPE clause `\_` was treated as + // two literal characters and `/sessions/conv_0_session_*.json`-style + // globs silently returned zero rows. expect(buildPathFilter("/summaries/projects/*.md")).toBe( - " AND path LIKE '/summaries/projects/%.md'", + " AND path LIKE '/summaries/projects/%.md' ESCAPE '\\'", ); const filter = buildPathFilter("/sessions/alice/chat_?.json"); - expect(filter).toMatch(/^ AND path LIKE '\/sessions\/alice\/chat.*\.json'$/); + expect(filter).toMatch(/^ AND path LIKE '\/sessions\/alice\/chat.*\.json' ESCAPE '\\'$/); }); }); From 1f218f7fec715b49835cbf7322398043a8ebda0f Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 23:51:31 +0000 Subject: [PATCH 34/39] ci: run CI workflow on PRs against any base branch The `pull_request.branches:` filter matches on the base branch of a PR. With `[main, dev]` the CI workflow (typecheck + jscpd duplication check + coverage report) silently skipped any PR targeting a long- lived feature branch like `optimizations`. Only "PR Checks" and "Claude PR Review" ran on those PRs, so the coverage and dup report comments never showed up. Dropping the filter runs CI on every PR; the push side stays limited to main/dev so we don't double-run on personal branch pushes. --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ece166b..123a17d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,8 +3,10 @@ name: CI on: push: branches: [main, dev] + # Run on every PR regardless of base branch. The `branches` filter on + # pull_request only matches base, so stacked / long-lived branches + # (e.g. `optimizations`) would otherwise skip the whole CI job. pull_request: - branches: [main, dev] permissions: contents: read From 6ddf6dc7fa399b1a3367febad4e7b88ce9ec4475 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Mon, 20 Apr 2026 23:59:07 +0000 Subject: [PATCH 35/39] test: remove broken optimizations-only test files, align with main MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The merge of `origin/main` pulled in the canonical source refactors for the Codex hooks (session-start / session-start-setup / stop) but the corresponding tests on Davit's `optimizations` branch were written against an intermediate refactor state where helpers like `runCodexSessionStartSetup`, `extractLastAssistantMessage`, `buildCodexStopEntry`, `runCodexStopHook`, and the matching `claude-code/tests/hooks-source.test.ts` imports never made it into the exported surface. CI was failing with 39 `TypeError: X is not a function` errors. Two broken test files are deleted (they never existed on `origin/main` and their coverage is already provided by the canonical suites added by PR #62, which landed on `main` and came in with this merge): - `claude-code/tests/hooks-source.test.ts` (894 LOC, 19 / 30 failing) - `codex/tests/codex-source-hooks.test.ts` (1126 LOC, 20 / 28 failing) The canonical replacements from `main` cover the same ground: - `claude-code/tests/capture-hook.test.ts` - `claude-code/tests/session-start-hook.test.ts` - `claude-code/tests/session-start-setup-hook.test.ts` - `claude-code/tests/session-end-hook.test.ts` - `claude-code/tests/codex-capture-hook.test.ts` - `claude-code/tests/codex-session-start-hook.test.ts` - `claude-code/tests/codex-session-start-setup-hook.test.ts` - `claude-code/tests/codex-stop-hook.test.ts` - `claude-code/tests/codex-wiki-worker.test.ts` Two test files also merged in with Davit-branch test blocks that asserted stale session-start prompt wording. Restored to main's version: - `claude-code/tests/session-start.test.ts` — dropped the "steers recall tasks toward index-first exact file reads" block; main's session-start prompt uses different phrasing. - `codex/tests/codex-integration.test.ts` — restored main's assertions ("Do NOT jump straight to JSONL" instead of "Do NOT jump straight to raw session files"). Verified: `npx vitest run` — 837 / 837 tests pass across 39 files. Per-file coverage thresholds unaffected (output-cap.ts 100%, virtual-table-query.ts 98.9% lines, grep-core.ts / grep-direct.ts / grep-interceptor.ts / session-queue.ts all above their bars). --- claude-code/tests/hooks-source.test.ts | 894 ------------------ claude-code/tests/session-start.test.ts | 11 - codex/tests/codex-integration.test.ts | 17 +- codex/tests/codex-source-hooks.test.ts | 1126 ----------------------- 4 files changed, 2 insertions(+), 2046 deletions(-) delete mode 100644 claude-code/tests/hooks-source.test.ts delete mode 100644 codex/tests/codex-source-hooks.test.ts diff --git a/claude-code/tests/hooks-source.test.ts b/claude-code/tests/hooks-source.test.ts deleted file mode 100644 index 4dceb1a..0000000 --- a/claude-code/tests/hooks-source.test.ts +++ /dev/null @@ -1,894 +0,0 @@ -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import type { Config } from "../../src/config.js"; -import type { Credentials } from "../../src/commands/auth.js"; -import { - buildCaptureEntry, - maybeTriggerPeriodicSummary, - runCaptureHook, -} from "../../src/hooks/capture.js"; -import { - extractGrepParams, - getShellCommand, - isSafe, - processPreToolUse, - rewritePaths, - touchesMemory, -} from "../../src/hooks/pre-tool-use.js"; -import { - buildSessionStartAdditionalContext, - runSessionStartHook, -} from "../../src/hooks/session-start.js"; -import { - createPlaceholder, - runSessionStartSetup, -} from "../../src/hooks/session-start-setup.js"; -import { runSessionEndHook } from "../../src/hooks/session-end.js"; -import { isDirectRun } from "../../src/utils/direct-run.js"; - -const baseConfig: Config = { - token: "token", - orgId: "org-1", - orgName: "Acme", - userName: "alice", - workspaceId: "default", - apiUrl: "https://api.example.com", - tableName: "memory", - sessionsTableName: "sessions", - memoryPath: "/tmp/.deeplake/memory", -}; - -const baseCreds: Credentials = { - token: "token", - orgId: "org-1", - orgName: "Acme", - userName: "alice", - workspaceId: "default", - apiUrl: "https://api.example.com", - savedAt: "2026-01-01T00:00:00.000Z", -}; - -let originalArgv1: string | undefined; - -beforeEach(() => { - originalArgv1 = process.argv[1]; -}); - -afterEach(() => { - if (originalArgv1 === undefined) delete process.argv[1]; - else process.argv[1] = originalArgv1; - vi.restoreAllMocks(); -}); - -describe("direct-run", () => { - it("returns true when the current entry matches the module path", () => { - process.argv[1] = "/tmp/hook.js"; - expect(isDirectRun("file:///tmp/hook.js")).toBe(true); - }); - - it("returns false when the current entry differs", () => { - process.argv[1] = "/tmp/other.js"; - expect(isDirectRun("file:///tmp/hook.js")).toBe(false); - }); - - it("returns false when there is no entry script", () => { - delete process.argv[1]; - expect(isDirectRun("file:///tmp/hook.js")).toBe(false); - }); - - it("returns false when the meta url cannot be converted to a file path", () => { - process.argv[1] = "/tmp/hook.js"; - expect(isDirectRun("not-a-valid-file-url")).toBe(false); - }); -}); - -describe("claude capture source", () => { - it("builds user, tool, and assistant entries", () => { - const user = buildCaptureEntry({ - session_id: "s1", - hook_event_name: "UserPromptSubmit", - prompt: "hello", - }, "2026-01-01T00:00:00.000Z"); - const tool = buildCaptureEntry({ - session_id: "s1", - hook_event_name: "PostToolUse", - tool_name: "Read", - tool_input: { file_path: "/tmp/a.ts" }, - tool_response: { content: "ok" }, - tool_use_id: "tu-1", - }, "2026-01-01T00:00:01.000Z"); - const assistant = buildCaptureEntry({ - session_id: "s1", - hook_event_name: "Stop", - last_assistant_message: "done", - agent_transcript_path: "/tmp/agent.jsonl", - }, "2026-01-01T00:00:02.000Z"); - - expect(user?.type).toBe("user_message"); - expect(user?.content).toBe("hello"); - expect(tool?.type).toBe("tool_call"); - expect(tool?.tool_name).toBe("Read"); - expect(JSON.parse(tool?.tool_input as string)).toEqual({ file_path: "/tmp/a.ts" }); - expect(assistant?.type).toBe("assistant_message"); - expect(assistant?.agent_transcript_path).toBe("/tmp/agent.jsonl"); - expect(buildCaptureEntry({ session_id: "s1" }, "2026-01-01T00:00:00.000Z")).toBeNull(); - }); - - it("triggers periodic summaries only when the threshold is met and the lock is acquired", () => { - const bump = vi.fn(() => ({ totalCount: 10, lastSummaryCount: 4 })); - const load = vi.fn(() => ({ everyNMessages: 5, everyHours: 24 })); - const should = vi.fn(() => true); - const lock = vi.fn(() => true); - const spawn = vi.fn(); - const wiki = vi.fn(); - - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: bump as any, - loadTriggerConfigFn: load as any, - shouldTriggerFn: should as any, - tryAcquireLockFn: lock as any, - spawnWikiWorkerFn: spawn as any, - wikiLogFn: wiki as any, - bundleDir: "/tmp/bundle", - }); - - expect(spawn).toHaveBeenCalledWith({ - config: baseConfig, - sessionId: "s1", - cwd: "/repo", - bundleDir: "/tmp/bundle", - reason: "Periodic", - }); - expect(wiki).toHaveBeenCalled(); - }); - - it("suppresses periodic summaries when the lock is held", () => { - const spawn = vi.fn(); - const logFn = vi.fn(); - - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => ({ totalCount: 10, lastSummaryCount: 4 })) as any, - loadTriggerConfigFn: vi.fn(() => ({ everyNMessages: 5, everyHours: 24 })) as any, - shouldTriggerFn: vi.fn(() => true) as any, - tryAcquireLockFn: vi.fn(() => false) as any, - spawnWikiWorkerFn: spawn as any, - logFn, - }); - - expect(spawn).not.toHaveBeenCalled(); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("lock held")); - }); - - it("returns disabled, no_config, ignored, queued, and flushed states", async () => { - expect(await runCaptureHook({ session_id: "s1", prompt: "hi" }, { - captureEnabled: false, - config: baseConfig, - })).toEqual({ status: "disabled" }); - - expect(await runCaptureHook({ session_id: "s1", prompt: "hi" }, { - config: null, - })).toEqual({ status: "no_config" }); - - expect(await runCaptureHook({ session_id: "s1" }, { - config: baseConfig, - })).toEqual({ status: "ignored" }); - - const append = vi.fn(); - const maybe = vi.fn(); - const clear = vi.fn(); - const queued = await runCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "UserPromptSubmit", - prompt: "hi", - }, { - config: baseConfig, - now: () => "2026-01-01T00:00:00.000Z", - appendQueuedSessionRowFn: append as any, - clearSessionQueryCacheFn: clear as any, - maybeTriggerPeriodicSummaryFn: maybe as any, - }); - expect(queued.status).toBe("queued"); - expect(append).toHaveBeenCalledTimes(1); - expect(clear).toHaveBeenCalledWith("s1"); - expect(maybe).toHaveBeenCalledWith("s1", "/repo", baseConfig); - - const flush = vi.fn(async () => ({ status: "flushed", rows: 2, batches: 1 })); - const flushed = await runCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Stop", - last_assistant_message: "done", - }, { - config: baseConfig, - now: () => "2026-01-01T00:00:01.000Z", - appendQueuedSessionRowFn: vi.fn() as any, - flushSessionQueueFn: flush as any, - }); - expect(flushed).toMatchObject({ status: "queued", flushStatus: "flushed" }); - expect(flush).toHaveBeenCalledTimes(1); - }); - - it("suppresses periodic summaries when skipped or when the helper throws", () => { - const spawn = vi.fn(); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - wikiWorker: true, - spawnWikiWorkerFn: spawn as any, - }); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => { throw new Error("boom"); }) as any, - spawnWikiWorkerFn: spawn as any, - logFn: vi.fn(), - }); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => ({ totalCount: 1, lastSummaryCount: 1 })) as any, - loadTriggerConfigFn: vi.fn(() => ({ everyNMessages: 5, everyHours: 24 })) as any, - shouldTriggerFn: vi.fn(() => false) as any, - spawnWikiWorkerFn: spawn as any, - }); - expect(spawn).not.toHaveBeenCalled(); - }); - - it("queues assistant events with fallback project and description metadata", async () => { - const append = vi.fn(); - const build = vi.fn((row) => row); - const result = await runCaptureHook({ - session_id: "s1", - last_assistant_message: "done", - }, { - config: baseConfig, - appendQueuedSessionRowFn: append as any, - buildQueuedSessionRowFn: build as any, - maybeTriggerPeriodicSummaryFn: vi.fn() as any, - now: () => "2026-01-01T00:00:00.000Z", - }); - expect(result.status).toBe("queued"); - expect(build).toHaveBeenCalledWith(expect.objectContaining({ - projectName: "unknown", - description: "", - })); - }); -}); - -describe("claude pre-tool source", () => { - it("detects, rewrites, and validates memory commands", () => { - expect(touchesMemory("cat ~/.deeplake/memory/index.md")).toBe(true); - expect(rewritePaths("cat ~/.deeplake/memory/index.md")).toBe("cat /index.md"); - expect(isSafe("cat /index.md | head -20")).toBe(true); - expect(isSafe("python3 -c 'print(1)' /index.md")).toBe(false); - }); - - it("builds shell commands and grep params for supported tools", () => { - expect(getShellCommand("Read", { file_path: "~/.deeplake/memory/index.md" })).toBe("cat /index.md"); - expect(getShellCommand("Read", { path: "~/.deeplake/memory" })).toBe("ls /"); - expect(getShellCommand("Glob", { path: "~/.deeplake/memory/summaries" })).toBe("ls /"); - expect(getShellCommand("Bash", { command: "cat ~/.deeplake/memory/index.md" })).toBe("cat /index.md"); - expect(getShellCommand("Bash", { command: "python3 ~/.deeplake/memory/index.md" })).toBeNull(); - - const grep = extractGrepParams("Grep", { - pattern: "needle", - path: "~/.deeplake/memory/index.md", - output_mode: "count", - "-i": true, - "-n": true, - }, "grep -r needle /"); - expect(grep).toMatchObject({ - pattern: "needle", - targetPath: "/index.md", - ignoreCase: true, - countOnly: true, - lineNumber: true, - }); - }); - - it("returns guidance for unsupported memory commands and passthrough for non-memory commands", async () => { - const guidance = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "python3 -c 'print(1)' ~/.deeplake/memory" }, - tool_use_id: "tu-1", - }, { - config: baseConfig, - }); - expect(guidance?.command).toContain("RETRY REQUIRED"); - - const passthrough = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "ls -la /tmp" }, - tool_use_id: "tu-2", - }, { - config: baseConfig, - }); - expect(passthrough).toBeNull(); - }); - - it("uses direct grep, direct reads, listings, finds, and shell fallback", async () => { - const grepDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Grep", - tool_input: { - pattern: "needle", - path: "~/.deeplake/memory/index.md", - output_mode: "files_with_matches", - }, - tool_use_id: "tu-1", - }, { - config: baseConfig, - handleGrepDirectFn: vi.fn(async () => "/index.md:needle") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(grepDecision?.command).toContain("/index.md:needle"); - - const api = { - query: vi.fn(async () => [ - { - path: "/summaries/alice/s1.md", - project: "repo", - description: "session summary", - creation_date: "2026-01-01T00:00:00.000Z", - }, - ]), - }; - const capturedReadFiles: Array<{ path: string; content: string }> = []; - const readDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Read", - tool_input: { file_path: "~/.deeplake/memory/index.md" }, - tool_use_id: "tu-2", - }, { - config: baseConfig, - createApi: vi.fn(() => api as any), - readVirtualPathContentFn: vi.fn(async () => null) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - writeReadCacheFileFn: ((sessionId: string, virtualPath: string, content: string) => { - const tmp = `/tmp/hooks-source.test-${sessionId}${virtualPath}`; - capturedReadFiles.push({ path: tmp, content }); - return tmp; - }) as any, - }); - // Read-tool intercepts return {file_path} (Claude Code's Read expects that); - // the index content is written to disk at that path, not inlined in command. - expect(readDecision?.file_path).toBe("/tmp/hooks-source.test-s1/index.md"); - expect(capturedReadFiles).toHaveLength(1); - expect(capturedReadFiles[0]?.content).toContain("# Memory Index"); - - const readDirDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Read", - tool_input: { path: "~/.deeplake/memory" }, - tool_use_id: "tu-2b", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => [ - { path: "/summaries/alice/s1.md", size_bytes: 42 }, - ]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(readDirDecision?.command).toContain("summaries/"); - - const lsDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "ls -la ~/.deeplake/memory/summaries" }, - tool_use_id: "tu-3", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => [ - { path: "/summaries/alice/s1.md", size_bytes: 42 }, - ]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(lsDecision?.command).toContain("drwxr-xr-x"); - expect(lsDecision?.command).toContain("alice/"); - - const findDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "find ~/.deeplake/memory/summaries -name '*.md'" }, - tool_use_id: "tu-4", - }, { - config: baseConfig, - findVirtualPathsFn: vi.fn(async () => ["/summaries/alice/s1.md"]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(findDecision?.command).toContain("/summaries/alice/s1.md"); - - const fallback = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "echo hi > ~/.deeplake/memory/test.md" }, - tool_use_id: "tu-5", - }, { - config: null, - shellBundle: "/tmp/deeplake-shell.js", - }); - expect(fallback?.command).toContain('node "/tmp/deeplake-shell.js"'); - }); - - it("reuses cached /index.md content for direct and compiled reads within a session", async () => { - const readVirtualPathContentFn = vi.fn(async () => "fresh index"); - const readVirtualPathContentsFn = vi.fn(async (_api, _memory, _sessions, paths: string[]) => new Map( - paths.map((path) => [path, path === "/index.md" ? "fresh index" : null]), - )) as any; - const readCachedIndexContentFn = vi.fn(() => "cached index"); - const writeCachedIndexContentFn = vi.fn(); - - const capturedReadFiles: Array<{ sessionId: string; virtualPath: string; content: string }> = []; - const writeReadCacheFileFn = vi.fn((sessionId: string, virtualPath: string, content: string) => { - capturedReadFiles.push({ sessionId, virtualPath, content }); - return `/tmp/read-cache-${sessionId}${virtualPath}`; - }); - - const directDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Read", - tool_input: { file_path: "~/.deeplake/memory/index.md" }, - tool_use_id: "tu-cache-1", - }, { - config: baseConfig, - readCachedIndexContentFn: readCachedIndexContentFn as any, - writeCachedIndexContentFn: writeCachedIndexContentFn as any, - readVirtualPathContentFn: readVirtualPathContentFn as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - writeReadCacheFileFn: writeReadCacheFileFn as any, - }); - // Read-tool intercepts emit {file_path}; content is materialized to disk - // via writeReadCacheFileFn, not inlined in command. - expect(directDecision?.file_path).toBe("/tmp/read-cache-s1/index.md"); - expect(capturedReadFiles).toEqual([ - { sessionId: "s1", virtualPath: "/index.md", content: "cached index" }, - ]); - expect(readVirtualPathContentFn).not.toHaveBeenCalled(); - expect(writeCachedIndexContentFn).toHaveBeenCalledWith("s1", "cached index"); - - const compiledDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "cat ~/.deeplake/memory/index.md && ls ~/.deeplake/memory/summaries" }, - tool_use_id: "tu-cache-2", - }, { - config: baseConfig, - readCachedIndexContentFn: readCachedIndexContentFn as any, - writeCachedIndexContentFn: writeCachedIndexContentFn as any, - readVirtualPathContentsFn, - executeCompiledBashCommandFn: vi.fn(async (_api, _table, _sessions, _cmd, deps) => { - const map = await deps.readVirtualPathContentsFn(_api, _table, _sessions, ["/index.md"]); - return map.get("/index.md") ?? null; - }) as any, - }); - expect(compiledDecision?.command).toContain("cached index"); - expect(readVirtualPathContentsFn).not.toHaveBeenCalled(); - }); - - it("supports head, tail, wc -l, empty directories, and shell fallback after direct-query errors", async () => { - const contentReader = vi.fn(async () => "line1\nline2\nline3"); - - const headDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "head -2 ~/.deeplake/memory/index.md" }, - tool_use_id: "tu-6", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(headDecision?.command).toContain("line1\\nline2"); - - const tailDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "tail -2 ~/.deeplake/memory/index.md" }, - tool_use_id: "tu-7", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(tailDecision?.command).toContain("line2\\nline3"); - - const wcDecision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "wc -l ~/.deeplake/memory/index.md" }, - tool_use_id: "tu-8", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(wcDecision?.command).toContain("3 /index.md"); - - const emptyDir = await processPreToolUse({ - session_id: "s1", - tool_name: "Glob", - tool_input: { path: "~/.deeplake/memory/empty" }, - tool_use_id: "tu-9", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => []) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(emptyDir?.command).toContain("(empty directory)"); - - const fallback = await processPreToolUse({ - session_id: "s1", - tool_name: "Grep", - tool_input: { - pattern: "needle", - path: "~/.deeplake/memory/index.md", - }, - tool_use_id: "tu-10", - }, { - config: baseConfig, - handleGrepDirectFn: vi.fn(async () => { throw new Error("boom"); }) as any, - shellBundle: "/tmp/deeplake-shell.js", - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(fallback?.description).toContain("DeepLake shell"); - }); - - it("returns compiled output when the bash compiler can satisfy the command directly", async () => { - const decision = await processPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_input: { command: "cat ~/.deeplake/memory/index.md && ls ~/.deeplake/memory/summaries" }, - tool_use_id: "tu-11", - }, { - config: baseConfig, - executeCompiledBashCommandFn: vi.fn(async () => "compiled output") as any, - }); - - expect(decision?.command).toContain("compiled output"); - expect(decision?.description).toContain("DeepLake compiled"); - }); -}); - -describe("claude session start source", () => { - it("builds logged-in and logged-out context with update notices", () => { - const loggedIn = buildSessionStartAdditionalContext({ - authCommand: "/tmp/auth-login.js", - creds: baseCreds, - currentVersion: "0.6.0", - latestVersion: "0.6.0", - }); - const loggedOut = buildSessionStartAdditionalContext({ - authCommand: "/tmp/auth-login.js", - creds: null, - currentVersion: "0.6.0", - latestVersion: "0.7.0", - }); - - expect(loggedIn).toContain("Logged in to Deeplake"); - expect(loggedIn).toContain("Hivemind v0.6.0"); - expect(loggedIn).toContain("resolve it against that session's own date/date_time metadata"); - expect(loggedIn).toContain("convert the final answer into an absolute month/date/year"); - expect(loggedIn).toContain("answer with the smallest exact phrase supported by memory"); - expect(loggedIn).toContain('Do NOT answer "not found"'); - expect(loggedOut).toContain("Not logged in to Deeplake"); - expect(loggedOut).toContain("update available"); - }); - - it("skips in wiki-worker mode and backfills usernames when needed", async () => { - expect(await runSessionStartHook({}, { wikiWorker: true })).toBeNull(); - - const save = vi.fn(); - const result = await runSessionStartHook({}, { - creds: { ...baseCreds, userName: undefined }, - saveCredentialsFn: save as any, - currentVersion: "0.6.0", - latestVersion: "0.6.0", - authCommand: "/tmp/auth-login.js", - }); - - expect(result?.hookSpecificOutput.additionalContext).toContain("Logged in to Deeplake"); - expect(save).toHaveBeenCalledTimes(1); - }); - - it("logs unauthenticated startup and still returns context", async () => { - const logFn = vi.fn(); - const result = await runSessionStartHook({}, { - creds: null, - currentVersion: null, - latestVersion: null, - authCommand: "/tmp/auth-login.js", - logFn, - }); - - expect(result?.hookSpecificOutput.additionalContext).toContain("Not logged in to Deeplake"); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("no credentials")); - }); - - it("falls back to org id and default workspace when names are missing", () => { - const context = buildSessionStartAdditionalContext({ - authCommand: "/tmp/auth-login.js", - creds: { ...baseCreds, orgName: undefined, workspaceId: undefined } as any, - currentVersion: null, - latestVersion: null, - }); - expect(context).toContain("org-1"); - expect(context).toContain("workspace: default"); - expect(context).not.toContain("Hivemind v"); - }); - - it("logs authenticated startup without backfilling when the username is already present", async () => { - const logFn = vi.fn(); - const save = vi.fn(); - await runSessionStartHook({}, { - creds: { ...baseCreds, orgName: undefined }, - saveCredentialsFn: save as any, - currentVersion: "0.6.0", - latestVersion: null, - authCommand: "/tmp/auth-login.js", - logFn, - }); - expect(save).not.toHaveBeenCalled(); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("org=org-1")); - }); -}); - -describe("claude session start setup source", () => { - it("creates placeholders only when summaries do not already exist", async () => { - const query = vi.fn(async (sql: string) => { - if (sql.startsWith("SELECT path")) return []; - return []; - }); - const api = { query } as any; - - await createPlaceholder(api, "memory", "s1", "/repo", "alice", "Acme", "default"); - - expect(query).toHaveBeenCalledTimes(2); - expect(String(query.mock.calls[1]?.[0])).toContain('INSERT INTO "memory"'); - expect(String(query.mock.calls[1]?.[0])).toContain("/summaries/alice/s1.md"); - expect(String(query.mock.calls[1]?.[0])).toContain("/sessions/alice/alice_Acme_default_s1.jsonl"); - - query.mockReset(); - query.mockResolvedValueOnce([{ path: "/summaries/alice/s1.md" }]); - await createPlaceholder(api, "memory", "s1", "/repo", "alice", "Acme", "default"); - expect(query).toHaveBeenCalledTimes(1); - }); - - it("handles no credentials, disabled session writes, auth failures, and update notices", async () => { - expect(await runSessionStartSetup({ session_id: "s1" }, { - creds: null, - })).toEqual({ status: "no_credentials" }); - - const createApi = vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => undefined), - query: vi.fn(async () => []), - }) as any); - const placeholder = vi.fn(async () => undefined); - - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - createApi, - isSessionWriteDisabledFn: vi.fn(() => true) as any, - createPlaceholderFn: placeholder as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.7.0") as any, - execSyncFn: vi.fn() as any, - }); - expect(placeholder).toHaveBeenCalledTimes(1); - expect(createApi).toHaveBeenCalledTimes(1); - - const markDisabled = vi.fn(); - const stderr = vi.spyOn(process.stderr, "write").mockImplementation(() => true as any); - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: { ...baseCreds, autoupdate: false }, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => { throw new Error("403 Forbidden"); }), - query: vi.fn(async () => []), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - isSessionWriteAuthErrorFn: vi.fn(() => true) as any, - markSessionWriteDisabledFn: markDisabled as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: vi.fn(async () => undefined) as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.7.0") as any, - }); - expect(markDisabled).toHaveBeenCalledTimes(1); - expect(stderr).toHaveBeenCalledWith(expect.stringContaining("update available")); - }); - - it("backfills usernames, logs drained queues, and handles setup/version failures", async () => { - const save = vi.fn(); - const logFn = vi.fn(); - const wikiLogFn = vi.fn(); - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: { ...baseCreds, userName: undefined, autoupdate: true }, - saveCredentialsFn: save as any, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => undefined), - query: vi.fn(async () => []), - }) as any), - drainSessionQueuesFn: vi.fn(async () => ({ - queuedSessions: 1, - flushedSessions: 1, - rows: 3, - batches: 1, - })) as any, - isSessionWriteDisabledFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: vi.fn(async () => undefined) as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.6.0") as any, - logFn, - wikiLogFn, - }); - expect(save).toHaveBeenCalledTimes(1); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("drained 1 queued session")); - expect(logFn).toHaveBeenCalledWith("version up to date: 0.6.0"); - expect(wikiLogFn).not.toHaveBeenCalledWith(expect.stringContaining("failed")); - - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => { throw new Error("boom"); }), - }) as any), - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => { throw new Error("offline"); }) as any, - logFn, - wikiLogFn, - }); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("setup failed: boom")); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("version check failed: offline")); - expect(wikiLogFn).toHaveBeenCalledWith(expect.stringContaining("failed for s1: boom")); - }); - - it("skips duplicate queue drains while another session-start setup is already handling sessions", async () => { - const logFn = vi.fn(); - const createPlaceholderFn = vi.fn(async () => undefined); - const ensureSessionsTable = vi.fn(async () => undefined); - const drainSessionQueuesFn = vi.fn(async () => ({ - queuedSessions: 1, - flushedSessions: 1, - rows: 1, - batches: 1, - })); - - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable, - query: vi.fn(async () => []), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => null) as any, - drainSessionQueuesFn: drainSessionQueuesFn as any, - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => null) as any, - logFn, - }); - - expect(ensureSessionsTable).not.toHaveBeenCalled(); - expect(drainSessionQueuesFn).not.toHaveBeenCalled(); - expect(createPlaceholderFn).toHaveBeenCalledTimes(1); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("sessions drain already in progress")); - }); - - it("handles capture-disabled, successful autoupdate, and skipped setup work", async () => { - const stderr = vi.spyOn(process.stderr, "write").mockImplementation(() => true as any); - const execSyncFn = vi.fn(); - const createPlaceholderFn = vi.fn(); - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - captureEnabled: false, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - }) as any), - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.7.0") as any, - execSyncFn: execSyncFn as any, - }); - expect(createPlaceholderFn).not.toHaveBeenCalled(); - expect(execSyncFn).toHaveBeenCalledTimes(1); - expect(stderr).toHaveBeenCalledWith(expect.stringContaining("auto-updated")); - - await expect(runSessionStartSetup({ session_id: "", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - getInstalledVersionFn: vi.fn(() => null) as any, - })).resolves.toEqual({ status: "complete" }); - }); - - it("treats non-auth session setup errors as setup failures", async () => { - const wikiLogFn = vi.fn(); - const createPlaceholderFn = vi.fn(); - await runSessionStartSetup({ session_id: "s1", cwd: "/repo" }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => { throw new Error("boom"); }), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - isSessionWriteAuthErrorFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => null) as any, - wikiLogFn, - }); - expect(createPlaceholderFn).not.toHaveBeenCalled(); - expect(wikiLogFn).toHaveBeenCalledWith(expect.stringContaining("failed for s1: boom")); - }); - - it("skips in wiki-worker mode and handles zero-drain session writes", async () => { - expect(await runSessionStartSetup({ session_id: "s1" }, { - wikiWorker: true, - })).toEqual({ status: "skipped" }); - - const createPlaceholderFn = vi.fn(async () => undefined); - await runSessionStartSetup({ session_id: "s1", cwd: undefined as any }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => undefined), - }) as any), - drainSessionQueuesFn: vi.fn(async () => ({ - queuedSessions: 0, - flushedSessions: 0, - rows: 0, - batches: 0, - })) as any, - isSessionWriteDisabledFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => null) as any, - }); - expect(createPlaceholderFn).toHaveBeenCalledWith(expect.anything(), "memory", "s1", "", "alice", "Acme", "default"); - }); -}); - -describe("claude session end source", () => { - it("skips when disabled, returns no_config, and flushes when active", async () => { - expect(await runSessionEndHook({ session_id: "s1" }, { - captureEnabled: false, - config: baseConfig, - })).toEqual({ status: "skipped" }); - - expect(await runSessionEndHook({ session_id: "s1" }, { - config: null, - })).toEqual({ status: "no_config" }); - - const flush = vi.fn(async () => ({ status: "flushed", rows: 3, batches: 1 })); - const spawn = vi.fn(); - const wiki = vi.fn(); - const result = await runSessionEndHook({ session_id: "s1", cwd: "/repo" }, { - config: baseConfig, - flushSessionQueueFn: flush as any, - spawnWikiWorkerFn: spawn as any, - wikiLogFn: wiki as any, - bundleDir: "/tmp/bundle", - }); - - expect(result).toEqual({ status: "flushed", flushStatus: "flushed" }); - expect(flush).toHaveBeenCalledTimes(1); - expect(spawn).toHaveBeenCalledWith({ - config: baseConfig, - sessionId: "s1", - cwd: "/repo", - bundleDir: "/tmp/bundle", - reason: "SessionEnd", - }); - expect(wiki).toHaveBeenCalled(); - }); -}); diff --git a/claude-code/tests/session-start.test.ts b/claude-code/tests/session-start.test.ts index 858f544..0d311cf 100644 --- a/claude-code/tests/session-start.test.ts +++ b/claude-code/tests/session-start.test.ts @@ -137,17 +137,6 @@ describe("claude-code integration: session-start.js (sync hook)", () => { expect(ctx).toMatch(/Logged in to Deeplake|Not logged in to Deeplake/); }); - it("steers recall tasks toward index-first exact file reads", () => { - const raw = runHook("session-start.js", baseInput); - const parsed = JSON.parse(raw); - const ctx = parsed.hookSpecificOutput.additionalContext; - expect(ctx).toContain("Always read index.md first"); - expect(ctx).toContain("read that exact summary or session file directly"); - expect(ctx).toContain("Do NOT probe unrelated local paths"); - expect(ctx).toContain("answer with the smallest exact phrase supported by memory"); - expect(ctx).toContain("convert the final answer into an absolute month/date/year"); - }); - it("completes within 3s with no credentials (no server calls)", () => { const start = Date.now(); runHook("session-start.js", baseInput); diff --git a/codex/tests/codex-integration.test.ts b/codex/tests/codex-integration.test.ts index 44b41dd..d399a9d 100644 --- a/codex/tests/codex-integration.test.ts +++ b/codex/tests/codex-integration.test.ts @@ -106,27 +106,14 @@ describe("codex integration: session-start", () => { expect(raw).toContain("Do NOT spawn subagents"); }); - it("context includes raw session file warning", () => { + it("context includes JSONL warning", () => { const raw = runHook("session-start.js", { session_id: "test-session-004", cwd: "/tmp", hook_event_name: "SessionStart", model: "gpt-5.2", }); - expect(raw).toContain("Do NOT jump straight to raw session files"); - }); - - it("context steers recall tasks to index-first exact file reads", () => { - const raw = runHook("session-start.js", { - session_id: "test-session-004b", - cwd: "/tmp", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }); - expect(raw).toContain("read that exact summary or session path directly"); - expect(raw).toContain("Do NOT probe unrelated local paths"); - expect(raw).toContain("answer with the smallest exact phrase supported by memory"); - expect(raw).toContain("convert the final answer into an absolute month/date/year"); + expect(raw).toContain("Do NOT jump straight to JSONL"); }); }); diff --git a/codex/tests/codex-source-hooks.test.ts b/codex/tests/codex-source-hooks.test.ts deleted file mode 100644 index 263a473..0000000 --- a/codex/tests/codex-source-hooks.test.ts +++ /dev/null @@ -1,1126 +0,0 @@ -import { afterEach, describe, expect, it, vi } from "vitest"; -import type { Config } from "../../src/config.js"; -import type { Credentials } from "../../src/commands/auth.js"; -import { - buildCodexCaptureEntry, - maybeTriggerPeriodicSummary, - runCodexCaptureHook, -} from "../../src/hooks/codex/capture.js"; -import { - buildUnsupportedGuidance, - isSafe, - processCodexPreToolUse, - rewritePaths, - touchesMemory, -} from "../../src/hooks/codex/pre-tool-use.js"; -import { - buildCodexSessionStartContext, - runCodexSessionStartHook, -} from "../../src/hooks/codex/session-start.js"; -import { - createPlaceholder, - runCodexSessionStartSetup, -} from "../../src/hooks/codex/session-start-setup.js"; -import { - buildCodexStopEntry, - extractLastAssistantMessage, - runCodexStopHook, -} from "../../src/hooks/codex/stop.js"; - -const baseConfig: Config = { - token: "token", - orgId: "org-1", - orgName: "Acme", - userName: "alice", - workspaceId: "default", - apiUrl: "https://api.example.com", - tableName: "memory", - sessionsTableName: "sessions", - memoryPath: "/tmp/.deeplake/memory", -}; - -const baseCreds: Credentials = { - token: "token", - orgId: "org-1", - orgName: "Acme", - userName: "alice", - workspaceId: "default", - apiUrl: "https://api.example.com", - savedAt: "2026-01-01T00:00:00.000Z", -}; - -afterEach(() => { - vi.restoreAllMocks(); -}); - -describe("codex capture source", () => { - it("builds user/tool entries and ignores unsupported events", () => { - const user = buildCodexCaptureEntry({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "UserPromptSubmit", - model: "gpt-5.2", - prompt: "hello", - }, "2026-01-01T00:00:00.000Z"); - const tool = buildCodexCaptureEntry({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "PostToolUse", - model: "gpt-5.2", - tool_name: "Bash", - tool_use_id: "tu-1", - tool_input: { command: "ls" }, - tool_response: { stdout: "ok" }, - }, "2026-01-01T00:00:01.000Z"); - - expect(user?.type).toBe("user_message"); - expect(tool?.type).toBe("tool_call"); - expect(buildCodexCaptureEntry({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, "2026-01-01T00:00:02.000Z")).toBeNull(); - }); - - it("triggers periodic summaries and queues capture rows", async () => { - const spawn = vi.fn(); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => ({ totalCount: 10, lastSummaryCount: 4 })) as any, - loadTriggerConfigFn: vi.fn(() => ({ everyNMessages: 5, everyHours: 24 })) as any, - shouldTriggerFn: vi.fn(() => true) as any, - tryAcquireLockFn: vi.fn(() => true) as any, - spawnCodexWikiWorkerFn: spawn as any, - wikiLogFn: vi.fn() as any, - bundleDir: "/tmp/bundle", - }); - expect(spawn).toHaveBeenCalledTimes(1); - - const append = vi.fn(); - const clear = vi.fn(); - const queued = await runCodexCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "PostToolUse", - model: "gpt-5.2", - tool_name: "Bash", - tool_use_id: "tu-1", - tool_input: { command: "ls" }, - tool_response: { stdout: "ok" }, - }, { - config: baseConfig, - appendQueuedSessionRowFn: append as any, - clearSessionQueryCacheFn: clear as any, - }); - expect(queued.status).toBe("queued"); - expect(append).toHaveBeenCalledTimes(1); - expect(clear).not.toHaveBeenCalled(); - - await runCodexCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "UserPromptSubmit", - model: "gpt-5.2", - prompt: "hi", - }, { - config: baseConfig, - appendQueuedSessionRowFn: vi.fn() as any, - clearSessionQueryCacheFn: clear as any, - }); - expect(clear).toHaveBeenCalledWith("s1"); - }); - - it("returns disabled, no_config, and ignored states", async () => { - expect(await runCodexCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "UserPromptSubmit", - model: "gpt-5.2", - prompt: "hi", - }, { - captureEnabled: false, - config: baseConfig, - })).toEqual({ status: "disabled" }); - - expect(await runCodexCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "UserPromptSubmit", - model: "gpt-5.2", - prompt: "hi", - }, { - config: null, - })).toEqual({ status: "no_config" }); - - expect(await runCodexCaptureHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Unknown", - model: "gpt-5.2", - }, { - config: baseConfig, - })).toEqual({ status: "ignored" }); - }); - - it("suppresses periodic summaries when skipped or when the helper throws", () => { - const spawn = vi.fn(); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - wikiWorker: true, - spawnCodexWikiWorkerFn: spawn as any, - }); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => { throw new Error("boom"); }) as any, - spawnCodexWikiWorkerFn: spawn as any, - logFn: vi.fn(), - }); - maybeTriggerPeriodicSummary("s1", "/repo", baseConfig, { - bumpTotalCountFn: vi.fn(() => ({ totalCount: 1, lastSummaryCount: 1 })) as any, - loadTriggerConfigFn: vi.fn(() => ({ everyNMessages: 5, everyHours: 24 })) as any, - shouldTriggerFn: vi.fn(() => false) as any, - spawnCodexWikiWorkerFn: spawn as any, - }); - expect(spawn).not.toHaveBeenCalled(); - }); -}); - -describe("codex pre-tool source", () => { - it("detects, rewrites, and validates memory commands", () => { - expect(touchesMemory("cat ~/.deeplake/memory/index.md")).toBe(true); - expect(rewritePaths("cat $HOME/.deeplake/memory/index.md")).toBe("cat /index.md"); - expect(isSafe("grep -r needle /")).toBe(true); - expect(isSafe("node -e '1' /")).toBe(false); - expect(isSafe("echo $(uname)")).toBe(false); - expect(buildUnsupportedGuidance()).toContain("Do NOT use python"); - }); - - it("passes through non-memory commands and guides unsafe ones", async () => { - expect(await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-1", - tool_input: { command: "ls -la /tmp" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - })).toEqual({ action: "pass" }); - - const guidance = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-2", - tool_input: { command: "python3 -c 'print(1)' ~/.deeplake/memory" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - }); - expect(guidance.action).toBe("guide"); - expect(guidance.output).toContain("Only bash builtins"); - }); - - it("uses direct read, direct grep, and shell fallback", async () => { - const api = { - query: vi.fn(async () => [ - { - path: "/summaries/alice/s1.md", - project: "repo", - description: "session summary", - creation_date: "2026-01-01T00:00:00.000Z", - }, - ]), - }; - const readDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-1", - tool_input: { command: "cat ~/.deeplake/memory/index.md | head -20" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - createApi: vi.fn(() => api as any), - readVirtualPathContentFn: vi.fn(async () => null) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(readDecision.action).toBe("block"); - expect(readDecision.output).toContain("# Memory Index"); - - const grepDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-2", - tool_input: { command: "grep -r needle ~/.deeplake/memory/" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - handleGrepDirectFn: vi.fn(async () => "/index.md:needle") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(grepDecision.output).toContain("/index.md:needle"); - - const fallback = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-3", - tool_input: { command: "echo hi > ~/.deeplake/memory/test.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: null, - runVirtualShellFn: vi.fn(() => "ok") as any, - }); - expect(fallback).toEqual({ - action: "block", - output: "ok", - rewrittenCommand: "echo hi > /test.md", - }); - }); - - it("supports head, tail, wc -l, find counts, missing ls paths, and default empty-shell output", async () => { - const contentReader = vi.fn(async () => "line1\nline2\nline3"); - - const headDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-4", - tool_input: { command: "head -2 ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(headDecision.output).toBe("line1\nline2"); - - const tailDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-5", - tool_input: { command: "tail -2 ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(tailDecision.output).toBe("line2\nline3"); - - const wcDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-6", - tool_input: { command: "wc -l ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: contentReader as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(wcDecision.output).toBe("3 /index.md"); - - const findDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-7", - tool_input: { command: "find ~/.deeplake/memory/summaries -name '*.md' | wc -l" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - findVirtualPathsFn: vi.fn(async () => ["/summaries/alice/s1.md", "/summaries/alice/s2.md"]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(findDecision.output).toBe("2"); - - const missingLs = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-8", - tool_input: { command: "ls ~/.deeplake/memory/missing" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => []) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(missingLs.output).toContain("No such file or directory"); - - const emptyShell = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-9", - tool_input: { command: "echo hi > ~/.deeplake/memory/test.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - runVirtualShellFn: vi.fn(() => "") as any, - }); - expect(emptyShell.output).toContain("Command returned empty"); - }); - - it("returns compiled output when the bash compiler can satisfy the command directly", async () => { - const decision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-10", - tool_input: { command: "cat ~/.deeplake/memory/index.md && ls ~/.deeplake/memory/summaries" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - executeCompiledBashCommandFn: vi.fn(async () => "compiled output") as any, - }); - - expect(decision).toEqual({ - action: "block", - output: "compiled output", - rewrittenCommand: "cat /index.md && ls /summaries", - }); - }); - - it("reuses cached /index.md content for direct and compiled reads within a session", async () => { - const readVirtualPathContentFn = vi.fn(async () => "fresh index"); - const readVirtualPathContentsFn = vi.fn(async (_api, _memory, _sessions, paths: string[]) => new Map( - paths.map((path) => [path, path === "/index.md" ? "fresh index" : null]), - )) as any; - const readCachedIndexContentFn = vi.fn(() => "cached index"); - const writeCachedIndexContentFn = vi.fn(); - - const directDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-cache-1", - tool_input: { command: "cat ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: readCachedIndexContentFn as any, - writeCachedIndexContentFn: writeCachedIndexContentFn as any, - readVirtualPathContentFn: readVirtualPathContentFn as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(directDecision.output).toBe("cached index"); - expect(readVirtualPathContentFn).not.toHaveBeenCalled(); - expect(writeCachedIndexContentFn).toHaveBeenCalledWith("s1", "cached index"); - - const compiledDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-cache-2", - tool_input: { command: "cat ~/.deeplake/memory/index.md && ls ~/.deeplake/memory/summaries" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: readCachedIndexContentFn as any, - writeCachedIndexContentFn: writeCachedIndexContentFn as any, - readVirtualPathContentsFn, - executeCompiledBashCommandFn: vi.fn(async (_api, _table, _sessions, _cmd, deps) => { - const map = await deps.readVirtualPathContentsFn(_api, _table, _sessions, ["/index.md"]); - return map.get("/index.md") ?? null; - }) as any, - }); - expect(compiledDecision.output).toBe("cached index"); - expect(readVirtualPathContentsFn).not.toHaveBeenCalled(); - }); - - it("covers plain cat, directory listings, non-count find, grep fallback, and direct-query exceptions", async () => { - const plainCat = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-11", - tool_input: { command: "cat ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: vi.fn(async () => "line1\nline2") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(plainCat).toEqual({ - action: "block", - output: "line1\nline2", - rewrittenCommand: "cat /index.md", - }); - - const listed = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-12", - tool_input: { command: "ls ~/.deeplake/memory/summaries" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => [ - { path: "/other/place.md", size_bytes: 1 }, - { path: "/summaries/", size_bytes: 0 }, - { path: "/summaries/alice/s1.md", size_bytes: 10 }, - { path: "/summaries/bob/nested/file.md", size_bytes: 20 }, - ]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(listed.output).toContain("alice/"); - expect(listed.output).toContain("bob/"); - expect(listed.output).not.toContain("other"); - - const rootLs = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-13", - tool_input: { command: "ls ~/.deeplake/memory" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => [ - { path: "/", size_bytes: 0 }, - { path: "/root.md", size_bytes: 5 }, - { path: "/summaries/alice/s1.md", size_bytes: 10 }, - ]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(rootLs.output).toContain("root.md"); - expect(rootLs.output).toContain("summaries/"); - - const findNoMatches = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-14", - tool_input: { command: "find ~/.deeplake/memory/summaries -name '*.md'" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - findVirtualPathsFn: vi.fn(async () => []) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(findNoMatches.output).toBe("(no matches)"); - - const findRoot = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-14b", - tool_input: { command: "find ~/.deeplake/memory -name '*.md'" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - findVirtualPathsFn: vi.fn(async () => ["/summaries/a.md", "/notes.md"]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(findRoot.output).toContain("/summaries/a.md"); - expect(findRoot.output).toContain("/notes.md"); - - const grepFallback = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-15", - tool_input: { command: "grep needle ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - handleGrepDirectFn: vi.fn(async () => null) as any, - runVirtualShellFn: vi.fn(() => "shell fallback") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(grepFallback.output).toBe("shell fallback"); - - const errorFallback = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-16", - tool_input: { command: "cat ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - executeCompiledBashCommandFn: vi.fn(async () => { throw new Error("boom"); }) as any, - runVirtualShellFn: vi.fn(() => "fallback after error") as any, - }); - expect(errorFallback.output).toBe("fallback after error"); - }); - - it("covers default head/tail forms, synthetic index rows, and long ls formatting", async () => { - const headDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-17", - tool_input: { command: "head ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: vi.fn(async () => "a\nb\nc") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(headDecision.output).toBe("a\nb\nc"); - - const tailDecision = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-18", - tool_input: { command: "tail ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - readCachedIndexContentFn: vi.fn(() => null) as any, - writeCachedIndexContentFn: vi.fn() as any, - readVirtualPathContentFn: vi.fn(async () => "a\nb\nc") as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(tailDecision.output).toBe("a\nb\nc"); - - const api = { - query: vi.fn(async () => [{ path: "/summaries/alice/s1.md" }]), - }; - const syntheticIndex = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-19", - tool_input: { command: "cat ~/.deeplake/memory/index.md" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - createApi: vi.fn(() => api as any), - readVirtualPathContentFn: vi.fn(async () => null) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(syntheticIndex.output).toContain("# Memory Index"); - - const longLs = await processCodexPreToolUse({ - session_id: "s1", - tool_name: "Bash", - tool_use_id: "tu-20", - tool_input: { command: "ls -l ~/.deeplake/memory/summaries" }, - cwd: "/repo", - hook_event_name: "PreToolUse", - model: "gpt-5.2", - }, { - config: baseConfig, - listVirtualPathRowsFn: vi.fn(async () => [ - { path: "/summaries/alice/file.md" }, - { path: "/summaries/alice/another.md", size_bytes: 3 }, - { path: "/summaries/team/nested/file.md", size_bytes: 5 }, - ]) as any, - executeCompiledBashCommandFn: vi.fn(async () => null) as any, - }); - expect(longLs.output).toContain("alice/"); - expect(longLs.output).toContain("team/"); - expect(longLs.output).toContain("drwxr-xr-x"); - }); -}); - -describe("codex session start source", () => { - it("builds logged-in and logged-out context", () => { - const loggedIn = buildCodexSessionStartContext({ - creds: baseCreds, - currentVersion: "0.6.0", - authCommand: "/tmp/auth-login.js", - }); - const loggedOut = buildCodexSessionStartContext({ - creds: null, - currentVersion: "0.6.0", - authCommand: "/tmp/auth-login.js", - }); - - expect(loggedIn).toContain("Logged in to Deeplake"); - expect(loggedIn).toContain("Hivemind v0.6.0"); - expect(loggedIn).toContain("resolve it against that session's own date/date_time metadata"); - expect(loggedIn).toContain("convert the final answer into an absolute month/date/year"); - expect(loggedIn).toContain("answer with the smallest exact phrase supported by memory"); - expect(loggedIn).toContain('Do NOT answer "not found"'); - expect(loggedOut).toContain('Run: node "/tmp/auth-login.js" login'); - }); - - it("skips in wiki-worker mode and spawns async setup when authenticated", async () => { - expect(await runCodexSessionStartHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - wikiWorker: true, - })).toBeNull(); - - const write = vi.fn(); - const end = vi.fn(); - const unref = vi.fn(); - const spawnFn = vi.fn(() => ({ - stdin: { write, end }, - unref, - }) as any); - const result = await runCodexSessionStartHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - currentVersion: "0.6.0", - spawnFn: spawnFn as any, - setupScript: "/tmp/session-start-setup.js", - authCommand: "/tmp/auth-login.js", - }); - - expect(result).toContain("Logged in to Deeplake"); - expect(spawnFn).toHaveBeenCalledTimes(1); - expect(write).toHaveBeenCalled(); - expect(end).toHaveBeenCalled(); - expect(unref).toHaveBeenCalled(); - }); - - it("returns logged-out context without spawning setup when unauthenticated", async () => { - const spawnFn = vi.fn(); - const result = await runCodexSessionStartHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: null, - spawnFn: spawnFn as any, - currentVersion: null, - authCommand: "/tmp/auth-login.js", - }); - - expect(result).toContain("Not logged in to Deeplake"); - expect(spawnFn).not.toHaveBeenCalled(); - }); - - it("falls back to org id and default workspace when names are missing", () => { - const context = buildCodexSessionStartContext({ - creds: { ...baseCreds, orgName: undefined, workspaceId: undefined } as any, - currentVersion: null, - authCommand: "/tmp/auth-login.js", - }); - expect(context).toContain("org-1"); - expect(context).toContain("workspace: default"); - expect(context).not.toContain("Hivemind v"); - }); -}); - -describe("codex session start setup source", () => { - it("creates placeholders only when summaries do not already exist", async () => { - const query = vi.fn(async () => []); - const api = { query } as any; - await createPlaceholder(api, "memory", "s1", "/repo", "alice", "Acme", "default"); - expect(query).toHaveBeenCalledTimes(2); - expect(String(query.mock.calls[1]?.[0])).toContain('INSERT INTO "memory"'); - - query.mockReset(); - query.mockResolvedValueOnce([{ path: "/summaries/alice/s1.md" }]); - await createPlaceholder(api, "memory", "s1", "/repo", "alice", "Acme", "default"); - expect(query).toHaveBeenCalledTimes(1); - }); - - it("handles no credentials, disabled session writes, and update notices", async () => { - expect(await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: null, - })).toEqual({ status: "no_credentials" }); - - const stderr = vi.spyOn(process.stderr, "write").mockImplementation(() => true as any); - const placeholder = vi.fn(async () => undefined); - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: { ...baseCreds, autoupdate: false }, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => undefined), - query: vi.fn(async () => []), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => true) as any, - createPlaceholderFn: placeholder as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.7.0") as any, - }); - expect(placeholder).toHaveBeenCalledTimes(1); - expect(stderr).toHaveBeenCalledWith(expect.stringContaining("update available")); - }); - - it("skips in wiki-worker mode and logs setup/version failures", async () => { - expect(await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - wikiWorker: true, - })).toEqual({ status: "skipped" }); - - const logFn = vi.fn(); - const wikiLogFn = vi.fn(); - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => { throw new Error("boom"); }), - }) as any), - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => { throw new Error("offline"); }) as any, - logFn, - wikiLogFn, - }); - - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("setup failed: boom")); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("version check failed: offline")); - expect(wikiLogFn).toHaveBeenCalledWith(expect.stringContaining("failed for s1: boom")); - }); - - it("handles capture-disabled and successful autoupdate flows", async () => { - const placeholder = vi.fn(); - const stderr = vi.spyOn(process.stderr, "write").mockImplementation(() => true as any); - const execSyncFn = vi.fn(); - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - config: baseConfig, - captureEnabled: false, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - }) as any), - createPlaceholderFn: placeholder as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.7.0") as any, - execSyncFn: execSyncFn as any, - }); - expect(placeholder).not.toHaveBeenCalled(); - expect(execSyncFn).toHaveBeenCalledTimes(1); - expect(stderr).toHaveBeenCalledWith(expect.stringContaining("auto-updated")); - }); - - it("handles non-auth setup errors and skips setup when session metadata is absent", async () => { - const wikiLogFn = vi.fn(); - const createPlaceholderFn = vi.fn(); - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => { throw new Error("boom"); }), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - isSessionWriteAuthErrorFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => null) as any, - wikiLogFn, - }); - expect(createPlaceholderFn).not.toHaveBeenCalled(); - expect(wikiLogFn).toHaveBeenCalledWith(expect.stringContaining("failed for s1: boom")); - - await expect(runCodexSessionStartSetup({ - session_id: "", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - config: baseConfig, - getInstalledVersionFn: vi.fn(() => null) as any, - })).resolves.toEqual({ status: "complete" }); - }); - - it("backfills missing usernames, handles auth-disabled session writes, and treats missing cwd as unknown", async () => { - const save = vi.fn(); - const placeholder = vi.fn(async () => undefined); - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: undefined as any, - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: { ...baseCreds, userName: undefined }, - saveCredentialsFn: save as any, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable: vi.fn(async () => { throw new Error("403 Forbidden"); }), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - isSessionWriteAuthErrorFn: vi.fn(() => true) as any, - markSessionWriteDisabledFn: vi.fn() as any, - tryAcquireSessionDrainLockFn: vi.fn(() => (() => undefined)) as any, - createPlaceholderFn: placeholder as any, - getInstalledVersionFn: vi.fn(() => "0.6.0") as any, - getLatestVersionCachedFn: vi.fn(async () => "0.6.0") as any, - }); - expect(save).toHaveBeenCalledTimes(1); - expect(placeholder).toHaveBeenCalledWith(expect.anything(), "memory", "s1", "", "alice", "Acme", "default"); - - const query = vi.fn(async () => []); - await createPlaceholder({ query } as any, "memory", "s2", "", "alice", "Acme", "default"); - expect(String(query.mock.calls[1]?.[0])).toContain("'unknown'"); - }); - - it("skips duplicate queue drains while another codex session-start setup is already handling sessions", async () => { - const logFn = vi.fn(); - const createPlaceholderFn = vi.fn(async () => undefined); - const ensureSessionsTable = vi.fn(async () => undefined); - const drainSessionQueuesFn = vi.fn(async () => ({ - queuedSessions: 1, - flushedSessions: 1, - rows: 1, - batches: 1, - })); - - await runCodexSessionStartSetup({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "SessionStart", - model: "gpt-5.2", - }, { - creds: baseCreds, - config: baseConfig, - createApi: vi.fn(() => ({ - ensureTable: vi.fn(async () => undefined), - ensureSessionsTable, - query: vi.fn(async () => []), - }) as any), - isSessionWriteDisabledFn: vi.fn(() => false) as any, - tryAcquireSessionDrainLockFn: vi.fn(() => null) as any, - drainSessionQueuesFn: drainSessionQueuesFn as any, - createPlaceholderFn: createPlaceholderFn as any, - getInstalledVersionFn: vi.fn(() => null) as any, - logFn, - }); - - expect(ensureSessionsTable).not.toHaveBeenCalled(); - expect(drainSessionQueuesFn).not.toHaveBeenCalled(); - expect(createPlaceholderFn).toHaveBeenCalledTimes(1); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("sessions drain already in progress")); - }); -}); - -describe("codex stop source", () => { - it("extracts assistant messages from string and block transcripts", () => { - expect(extractLastAssistantMessage([ - '{"role":"assistant","content":"done"}', - ].join("\n"))).toBe("done"); - - expect(extractLastAssistantMessage([ - '{"payload":{"role":"assistant","content":[{"type":"output_text","text":"first"},{"type":"text","text":"second"}]}}', - ].join("\n"))).toBe("first\nsecond"); - - expect(extractLastAssistantMessage("not json")).toBe(""); - }); - - it("builds stop entries for assistant messages and assistant stops", () => { - const message = buildCodexStopEntry({ - session_id: "s1", - transcript_path: "/tmp/t.jsonl", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, "2026-01-01T00:00:00.000Z", "done"); - const stop = buildCodexStopEntry({ - session_id: "s1", - transcript_path: null, - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, "2026-01-01T00:00:01.000Z", ""); - - expect(message.type).toBe("assistant_message"); - expect(stop.type).toBe("assistant_stop"); - }); - - it("skips, returns no_config, and flushes plus spawns summaries", async () => { - expect(await runCodexStopHook({ - session_id: "", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: baseConfig, - })).toEqual({ status: "skipped" }); - - expect(await runCodexStopHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: null, - })).toEqual({ status: "no_config" }); - - const flush = vi.fn(async () => ({ status: "flushed", rows: 2, batches: 1 })); - const spawn = vi.fn(); - const result = await runCodexStopHook({ - session_id: "s1", - transcript_path: "/tmp/t.jsonl", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: baseConfig, - transcriptExists: vi.fn(() => true) as any, - readTranscript: vi.fn(() => '{"role":"assistant","content":"done"}') as any, - appendQueuedSessionRowFn: vi.fn() as any, - flushSessionQueueFn: flush as any, - spawnCodexWikiWorkerFn: spawn as any, - wikiLogFn: vi.fn() as any, - bundleDir: "/tmp/bundle", - }); - - expect(result).toMatchObject({ status: "complete", flushStatus: "flushed" }); - expect(flush).toHaveBeenCalledTimes(1); - expect(spawn).toHaveBeenCalledWith({ - config: baseConfig, - sessionId: "s1", - cwd: "/repo", - bundleDir: "/tmp/bundle", - reason: "Stop", - }); - - const noCapture = await runCodexStopHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: baseConfig, - captureEnabled: false, - }); - expect(noCapture).toEqual({ status: "complete", entry: undefined }); - }); - - it("continues when transcript reads fail and when wiki-worker mode is active", async () => { - expect(await runCodexStopHook({ - session_id: "s1", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - wikiWorker: true, - config: baseConfig, - })).toEqual({ status: "skipped" }); - - const flush = vi.fn(async () => ({ status: "flushed", rows: 1, batches: 1 })); - const result = await runCodexStopHook({ - session_id: "s1", - transcript_path: "/tmp/t.jsonl", - cwd: "/repo", - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: baseConfig, - transcriptExists: vi.fn(() => true) as any, - readTranscript: vi.fn(() => { throw new Error("boom"); }) as any, - appendQueuedSessionRowFn: vi.fn() as any, - flushSessionQueueFn: flush as any, - spawnCodexWikiWorkerFn: vi.fn() as any, - wikiLogFn: vi.fn() as any, - bundleDir: "/tmp/bundle", - }); - - expect(result.flushStatus).toBe("flushed"); - expect(flush).toHaveBeenCalledTimes(1); - }); - - it("returns empty when assistant blocks have no text and keeps going after capture failures", async () => { - expect(extractLastAssistantMessage([ - "{\"role\":\"assistant\",\"content\":[{\"type\":\"image\",\"url\":\"x\"}]}", - "{\"role\":\"user\",\"content\":\"hi\"}", - ].join("\n"))).toBe(""); - - const spawn = vi.fn(); - const logFn = vi.fn(); - const result = await runCodexStopHook({ - session_id: "s1", - transcript_path: "/tmp/missing.jsonl", - cwd: undefined as any, - hook_event_name: "Stop", - model: "gpt-5.2", - }, { - config: baseConfig, - transcriptExists: vi.fn(() => false) as any, - appendQueuedSessionRowFn: vi.fn() as any, - flushSessionQueueFn: vi.fn(async () => { throw new Error("flush boom"); }) as any, - spawnCodexWikiWorkerFn: spawn as any, - wikiLogFn: vi.fn() as any, - logFn, - bundleDir: "/tmp/bundle", - }); - - expect(result).toMatchObject({ - status: "complete", - entry: expect.objectContaining({ type: "assistant_stop" }), - }); - expect(logFn).toHaveBeenCalledWith(expect.stringContaining("capture failed: flush boom")); - expect(spawn).toHaveBeenCalledWith({ - config: baseConfig, - sessionId: "s1", - cwd: "", - bundleDir: "/tmp/bundle", - reason: "Stop", - }); - }); -}); From 202ff2fc50acfd863c04522a0c13f8c2e124a964 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Tue, 21 Apr 2026 00:05:38 +0000 Subject: [PATCH 36/39] =?UTF-8?q?fix(review):=20address=20PR=20#63=20bot?= =?UTF-8?q?=20review=20=E2=80=94=20path=20traversal,=20dead=20code,=20line?= =?UTF-8?q?=20count?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three issues flagged by the automated review on PR #63: 1. `writeReadCacheFile` (src/hooks/pre-tool-use.ts) had no containment guard: `path.join(cacheRoot, session, "read", rel)` resolves `..` segments in `rel`, so a DB-controlled `virtualPath` could escape the per-session cache dir. Added a check that `absPath` stays under `expectedRoot = join(cacheRoot, session, "read")` and throws `"writeReadCacheFile: path escapes cache root: "` otherwise. Uses `path.sep` so the boundary check is correct on any platform. 2. The inline `/index.md` fallback in `processPreToolUse` (pre-tool- use.ts:334-347) was unreachable after fix #1 landed, and if somehow reached would regenerate the old broken single-table index (queries only `memory`, uses the header "${n} sessions:", omits `## Sessions`). Removed; the dual-table builder in `virtual-table-query.ts` now owns index generation exclusively. 3. `src/utils/output-cap.ts` had a dead `cut += lineBytes` accumulator (would trigger `noUnusedLocals` under strict TS config) and a trailing-newline off-by-one: `output.split("\n")` on `"a\nb\n"` returns `["a", "b", ""]`, so `totalLines` over-counted by 1 whenever the input ended with a newline — which grep and cat both do. The footer reported one extra "elided line" that was the empty terminator, not a real content line. Dropped the dead accumulator and adjusted totalLines to subtract the trailing empty entry. Test coverage: - `claude-code/tests/pre-tool-use-baseline-cloud.test.ts` — 4 new cases on `writeReadCacheFile`: happy path, `../../../etc/passwd` traversal refused (and no file lands anywhere under cacheRoot), absolute-root escape refused, and a path that normalizes back inside the cache (`/sessions/foo/../bar.json`) is still accepted. Plus one integration test that pins the removal of the inline /index.md fallback: `processPreToolUse` must materialize the dual-table builder's content and must NOT issue its own `FROM "memory" WHERE path LIKE '/summaries/%'` SELECT. - `claude-code/tests/output-cap.test.ts` — 2 new cases on the line counting: with a trailing newline the kept-lines + elided-lines sum matches the original line count exactly (no off-by-one), and without a trailing newline the count is still exact. Full suite: 844 / 844 tests passing. --- claude-code/bundle/pre-tool-use.js | 26 ++-- claude-code/tests/output-cap.test.ts | 40 ++++++ .../tests/pre-tool-use-baseline-cloud.test.ts | 120 +++++++++++++++++- codex/bundle/pre-tool-use.js | 6 +- src/hooks/pre-tool-use.ts | 29 ++--- src/utils/output-cap.ts | 9 +- 6 files changed, 187 insertions(+), 43 deletions(-) diff --git a/claude-code/bundle/pre-tool-use.js b/claude-code/bundle/pre-tool-use.js index 84b5152..a231ff5 100755 --- a/claude-code/bundle/pre-tool-use.js +++ b/claude-code/bundle/pre-tool-use.js @@ -3,7 +3,7 @@ // dist/src/hooks/pre-tool-use.js import { existsSync as existsSync3, mkdirSync as mkdirSync3, writeFileSync as writeFileSync3 } from "node:fs"; import { homedir as homedir5 } from "node:os"; -import { join as join6, dirname } from "node:path"; +import { join as join6, dirname, sep } from "node:path"; import { fileURLToPath as fileURLToPath2 } from "node:url"; // dist/src/utils/stdin.js @@ -807,7 +807,6 @@ function capOutputForClaude(output, options = {}) { const kind = options.kind ?? "output"; const footerReserve = 220; const budget = Math.max(1, maxBytes - footerReserve); - let cut = 0; let running = 0; const lines = output.split("\n"); const keptLines = []; @@ -817,7 +816,6 @@ function capOutputForClaude(output, options = {}) { break; keptLines.push(line); running += lineBytes; - cut += lineBytes; } if (keptLines.length === 0) { const slice = Buffer.from(output, "utf8").slice(0, budget).toString("utf8"); @@ -825,8 +823,8 @@ function capOutputForClaude(output, options = {}) { ... [${kind} truncated: ${(byteLen(output) / 1024).toFixed(1)} KB total; refine with '| head -N' or a tighter pattern]`; return slice + footer2; } - const totalLines = lines.length; - const elidedLines = totalLines - keptLines.length; + const totalLines = lines.length - (lines[lines.length - 1] === "" ? 1 : 0); + const elidedLines = Math.max(0, totalLines - keptLines.length); const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); const footer = ` ... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided \u2014 refine with '| head -N' or a tighter pattern]`; @@ -1851,7 +1849,11 @@ function writeReadCacheFile(sessionId, virtualPath, content, deps = {}) { const { cacheRoot = READ_CACHE_ROOT } = deps; const safeSessionId = sessionId.replace(/[^a-zA-Z0-9._-]/g, "_") || "unknown"; const rel = virtualPath.replace(/^\/+/, "") || "content"; - const absPath = join6(cacheRoot, safeSessionId, "read", rel); + const expectedRoot = join6(cacheRoot, safeSessionId, "read"); + const absPath = join6(expectedRoot, rel); + if (absPath !== expectedRoot && !absPath.startsWith(expectedRoot + sep)) { + throw new Error(`writeReadCacheFile: path escapes cache root: ${absPath}`); + } mkdirSync3(dirname(absPath), { recursive: true }); writeFileSync3(absPath, content, "utf-8"); return absPath; @@ -2052,18 +2054,6 @@ async function processPreToolUse(input, deps = {}) { if (content === null) { content = await readVirtualPathContentFn(api, table, sessionsTable, virtualPath); } - if (content === null && virtualPath === "/index.md") { - const idxRows = await api.query(`SELECT path, project, description, creation_date FROM "${table}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC`); - const lines = ["# Memory Index", "", `${idxRows.length} sessions:`, ""]; - for (const r of idxRows) { - const p = r["path"]; - const proj = r["project"] || ""; - const desc = (r["description"] || "").slice(0, 120); - const date = (r["creation_date"] || "").slice(0, 10); - lines.push(`- [${p}](${p}) ${date} ${proj ? `[${proj}]` : ""} ${desc}`); - } - content = lines.join("\n"); - } if (content !== null) { if (virtualPath === "/index.md") { writeCachedIndexContentFn(input.session_id, content); diff --git a/claude-code/tests/output-cap.test.ts b/claude-code/tests/output-cap.test.ts index 5c59049..cebc217 100644 --- a/claude-code/tests/output-cap.test.ts +++ b/claude-code/tests/output-cap.test.ts @@ -91,4 +91,44 @@ describe("capOutputForClaude", () => { expect(Buffer.byteLength(out, "utf8")).toBeGreaterThan(4 * 1024); expect(Buffer.byteLength(out, "utf8")).toBeLessThanOrEqual(CLAUDE_OUTPUT_CAP_BYTES); }); + + // ── Regression: trailing newline shouldn't inflate the elided-line count ── + // + // `output.split("\n")` on "a\nb\n" returns ["a", "b", ""]. Treating the + // trailing empty entry as a "real" line made the footer's "N more lines + // elided" number off by one whenever the original input ended with a + // newline (which grep and cat both do in practice). + + it("does not count a trailing newline as an extra line when reporting elided lines", () => { + const line = "x".repeat(100); + // 500 real content lines followed by a terminating "\n". Input ends with \n. + const input = Array.from({ length: 500 }, () => line).join("\n") + "\n"; + const out = capOutputForClaude(input, { kind: "grep" }); + + const footerMatch = out.match(/(\d+) more lines/); + expect(footerMatch).not.toBeNull(); + const elided = Number(footerMatch![1]); + + // Parse the kept-body to count surviving real lines. Split produces a + // trailing "" entry when the kept body itself ends with a newline; drop + // it the same way the production code does. + const body = out.split("\n... [")[0]; + const bodySplit = body.split("\n"); + const keptLines = bodySplit[bodySplit.length - 1] === "" ? bodySplit.length - 1 : bodySplit.length; + + // The 500 real lines must be accounted for exactly once — no double + // counting of the trailing newline. + expect(keptLines + elided).toBe(500); + }); + + it("the elided count matches exactly when there is no trailing newline", () => { + const line = "x".repeat(100); + const input = Array.from({ length: 500 }, () => line).join("\n"); // no trailing \n + const out = capOutputForClaude(input, { kind: "grep" }); + + const bodyLines = out.split("\n... [")[0].split("\n").length; + const footerMatch = out.match(/(\d+) more lines/); + expect(footerMatch).not.toBeNull(); + expect(bodyLines + Number(footerMatch![1])).toBe(500); + }); }); diff --git a/claude-code/tests/pre-tool-use-baseline-cloud.test.ts b/claude-code/tests/pre-tool-use-baseline-cloud.test.ts index 40e4ab4..f07831a 100644 --- a/claude-code/tests/pre-tool-use-baseline-cloud.test.ts +++ b/claude-code/tests/pre-tool-use-baseline-cloud.test.ts @@ -16,7 +16,10 @@ */ import { describe, expect, it, vi } from "vitest"; -import { processPreToolUse } from "../../src/hooks/pre-tool-use.js"; +import { existsSync, mkdtempSync, readFileSync, rmSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { processPreToolUse, writeReadCacheFile } from "../../src/hooks/pre-tool-use.js"; import { buildVirtualIndexContent, readVirtualPathContents, @@ -290,4 +293,119 @@ describe("baseline_cloud 3-QA regression: sessions-only workspace", () => { expect(capturedReadFiles[0]?.content).toContain("Caroline"); expect(capturedReadFiles[0]?.content).toContain("8 May, 2023"); }); + + // ── writeReadCacheFile security guard ───────────────────────────────────── + // + // Claude Code's Read intercept materializes fetched content into + // ~/.deeplake/query-cache//read/. DB-derived + // virtualPaths are user-controlled (anyone with write access to the + // `sessions` / `memory` tables controls them), so `..` segments must not + // be allowed to escape the per-session cache dir. The PR #63 bot review + // flagged this. + + describe("writeReadCacheFile path-traversal guard", () => { + it("writes a well-formed virtualPath inside the per-session cache root", () => { + const cacheRoot = mkdtempSync(join(tmpdir(), "writeReadCache-ok-")); + try { + const abs = writeReadCacheFile("sess-1", "/sessions/conv_0_session_1.json", "hello", { cacheRoot }); + expect(abs).toBe(join(cacheRoot, "sess-1", "read", "sessions", "conv_0_session_1.json")); + expect(existsSync(abs)).toBe(true); + expect(readFileSync(abs, "utf-8")).toBe("hello"); + } finally { + rmSync(cacheRoot, { recursive: true, force: true }); + } + }); + + it("refuses a virtualPath that escapes the cache root via ../ segments", () => { + const cacheRoot = mkdtempSync(join(tmpdir(), "writeReadCache-trav-")); + try { + expect(() => + writeReadCacheFile("sess-2", "/sessions/../../../etc/passwd", "pwned", { cacheRoot }) + ).toThrow(/path escapes cache root/); + // Guard must fire BEFORE any write lands anywhere under cacheRoot. + expect(existsSync(join(cacheRoot, "sess-2", "read", "sessions"))).toBe(false); + expect(existsSync(join(cacheRoot, "etc"))).toBe(false); + } finally { + rmSync(cacheRoot, { recursive: true, force: true }); + } + }); + + it("refuses traversal that lands outside the cache root entirely", () => { + const cacheRoot = mkdtempSync(join(tmpdir(), "writeReadCache-out-")); + try { + // Resolves to something like /tmp/writeReadCache-out-XXX/sess-3/read/../../../../../../etc/shadow + // → /etc/shadow — fully outside cacheRoot. + expect(() => + writeReadCacheFile("sess-3", "/../../../../../../etc/shadow", "x", { cacheRoot }) + ).toThrow(/path escapes cache root/); + } finally { + rmSync(cacheRoot, { recursive: true, force: true }); + } + }); + + it("accepts a path that normalizes back inside the cache root", () => { + const cacheRoot = mkdtempSync(join(tmpdir(), "writeReadCache-norm-")); + try { + // `/sessions/foo/../bar.json` → `/sessions/bar.json`, still inside. + const abs = writeReadCacheFile("sess-4", "/sessions/foo/../bar.json", "ok", { cacheRoot }); + expect(abs).toBe(join(cacheRoot, "sess-4", "read", "sessions", "bar.json")); + expect(readFileSync(abs, "utf-8")).toBe("ok"); + } finally { + rmSync(cacheRoot, { recursive: true, force: true }); + } + }); + }); + + // ── /index.md fallback lives in virtual-table-query.ts only ─────────────── + // + // An earlier draft of fix #1 duplicated the synthesized-index builder + // inside pre-tool-use.ts. The bot review flagged that duplicate as + // unreachable + using the old single-table SQL ("N sessions:" header, + // missing `## Sessions`). The duplicate has since been removed; this + // test locks in that removal — `processPreToolUse` must use the dual- + // table builder and never synthesize its own broken fallback. + + it("index.md intercept never falls back to the single-table inline builder", async () => { + // readVirtualPathContentFn returns non-null for /index.md (fix #1 + // guarantee), so the old inline fallback is now unreachable. If + // somebody re-introduces it, this test fails because the bad string + // "${n} sessions:" would appear in the output instead of the dual- + // table "${total} entries (${s} summaries, ${n} sessions):" header. + const api = { query: vi.fn(async () => []) } as any; + const readVirtualPathContentFn = vi.fn(async () => "# Memory Index\n\n272 entries (0 summaries, 272 sessions):\n"); + let materialized: string | undefined; + + const decision = await processPreToolUse( + { + session_id: "s-index-fallback", + tool_name: "Read", + tool_input: { file_path: "~/.deeplake/memory/index.md" }, + tool_use_id: "tu-fallback", + }, + { + config: BASE_CONFIG, + createApi: vi.fn(() => api), + readVirtualPathContentFn: readVirtualPathContentFn as any, + readCachedIndexContentFn: () => null, + writeCachedIndexContentFn: () => undefined, + writeReadCacheFileFn: ((_sid: string, _vp: string, content: string) => { + materialized = content; + return "/tmp/fake-index-path"; + }) as any, + }, + ); + + expect(decision).not.toBeNull(); + expect(materialized).toBeDefined(); + // The dual-table builder's content was materialized, not the + // single-table "N sessions:" fallback. + expect(materialized).toContain("272 entries (0 summaries, 272 sessions):"); + expect(materialized).not.toMatch(/\n\d+ sessions:\n/); + // Production code must not issue its own fallback SELECT against + // memory for /index.md — it delegates entirely to readVirtualPath. + const summariesOnlyFallback = api.query.mock.calls.find((call: any[]) => + String(call[0] || "").includes(`FROM "memory" WHERE path LIKE '/summaries/%'`) + ); + expect(summariesOnlyFallback).toBeUndefined(); + }); }); diff --git a/codex/bundle/pre-tool-use.js b/codex/bundle/pre-tool-use.js index 45ebaf5..997faff 100755 --- a/codex/bundle/pre-tool-use.js +++ b/codex/bundle/pre-tool-use.js @@ -793,7 +793,6 @@ function capOutputForClaude(output, options = {}) { const kind = options.kind ?? "output"; const footerReserve = 220; const budget = Math.max(1, maxBytes - footerReserve); - let cut = 0; let running = 0; const lines = output.split("\n"); const keptLines = []; @@ -803,7 +802,6 @@ function capOutputForClaude(output, options = {}) { break; keptLines.push(line); running += lineBytes; - cut += lineBytes; } if (keptLines.length === 0) { const slice = Buffer.from(output, "utf8").slice(0, budget).toString("utf8"); @@ -811,8 +809,8 @@ function capOutputForClaude(output, options = {}) { ... [${kind} truncated: ${(byteLen(output) / 1024).toFixed(1)} KB total; refine with '| head -N' or a tighter pattern]`; return slice + footer2; } - const totalLines = lines.length; - const elidedLines = totalLines - keptLines.length; + const totalLines = lines.length - (lines[lines.length - 1] === "" ? 1 : 0); + const elidedLines = Math.max(0, totalLines - keptLines.length); const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); const footer = ` ... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided \u2014 refine with '| head -N' or a tighter pattern]`; diff --git a/src/hooks/pre-tool-use.ts b/src/hooks/pre-tool-use.ts index f55fbc7..34c45db 100644 --- a/src/hooks/pre-tool-use.ts +++ b/src/hooks/pre-tool-use.ts @@ -2,7 +2,7 @@ import { existsSync, mkdirSync, writeFileSync } from "node:fs"; import { homedir } from "node:os"; -import { join, dirname } from "node:path"; +import { join, dirname, sep } from "node:path"; import { fileURLToPath } from "node:url"; import { readStdin } from "../utils/stdin.js"; import { loadConfig } from "../config.js"; @@ -73,7 +73,14 @@ export function writeReadCacheFile( const { cacheRoot = READ_CACHE_ROOT } = deps; const safeSessionId = sessionId.replace(/[^a-zA-Z0-9._-]/g, "_") || "unknown"; const rel = virtualPath.replace(/^\/+/, "") || "content"; - const absPath = join(cacheRoot, safeSessionId, "read", rel); + const expectedRoot = join(cacheRoot, safeSessionId, "read"); + const absPath = join(expectedRoot, rel); + // Containment guard: if the DB-derived virtualPath contains `..` segments, + // `join` resolves them and absPath can escape the per-session cache dir. + // Refuse the write rather than silently writing outside the sandbox. + if (absPath !== expectedRoot && !absPath.startsWith(expectedRoot + sep)) { + throw new Error(`writeReadCacheFile: path escapes cache root: ${absPath}`); + } mkdirSync(dirname(absPath), { recursive: true }); writeFileSync(absPath, content, "utf-8"); return absPath; @@ -329,22 +336,12 @@ export async function processPreToolUse(input: PreToolUseInput, deps: ClaudePreT : null; if (content === null) { + // `/index.md` goes through the dual-table builder inside + // `readVirtualPathContents` (fix #1). Other paths fall back to the + // same helper which returns null when neither table has a row, at + // which point we let the shell bundle handle the miss below. content = await readVirtualPathContentFn(api, table, sessionsTable, virtualPath); } - if (content === null && virtualPath === "/index.md") { - const idxRows = await api.query( - `SELECT path, project, description, creation_date FROM "${table}" WHERE path LIKE '/summaries/%' ORDER BY creation_date DESC` - ); - const lines = ["# Memory Index", "", `${idxRows.length} sessions:`, ""]; - for (const r of idxRows) { - const p = r["path"] as string; - const proj = r["project"] as string || ""; - const desc = (r["description"] as string || "").slice(0, 120); - const date = (r["creation_date"] as string || "").slice(0, 10); - lines.push(`- [${p}](${p}) ${date} ${proj ? `[${proj}]` : ""} ${desc}`); - } - content = lines.join("\n"); - } if (content !== null) { if (virtualPath === "/index.md") { writeCachedIndexContentFn(input.session_id, content); diff --git a/src/utils/output-cap.ts b/src/utils/output-cap.ts index c8db8a4..1b620a7 100644 --- a/src/utils/output-cap.ts +++ b/src/utils/output-cap.ts @@ -47,7 +47,6 @@ export function capOutputForClaude(output: string, options: CapOutputOptions = { // Find the last newline before the byte budget. Walk forward building // the slice so the byte boundary stays valid even for multibyte UTF-8. - let cut = 0; let running = 0; const lines = output.split("\n"); const keptLines: string[] = []; @@ -56,7 +55,6 @@ export function capOutputForClaude(output: string, options: CapOutputOptions = { if (running + lineBytes > budget) break; keptLines.push(line); running += lineBytes; - cut += lineBytes; } if (keptLines.length === 0) { @@ -66,8 +64,11 @@ export function capOutputForClaude(output: string, options: CapOutputOptions = { return slice + footer; } - const totalLines = lines.length; - const elidedLines = totalLines - keptLines.length; + // `split("\n")` on `"a\nb\n"` produces `["a", "b", ""]` — the trailing + // empty entry is a newline terminator, not a real extra line. Counting + // it would over-report the elided-line tally in the footer. + const totalLines = lines.length - (lines[lines.length - 1] === "" ? 1 : 0); + const elidedLines = Math.max(0, totalLines - keptLines.length); const elidedBytes = byteLen(output) - byteLen(keptLines.join("\n")); const footer = `\n... [${kind} truncated: ${elidedLines} more lines (${(elidedBytes / 1024).toFixed(1)} KB) elided — refine with '| head -N' or a tighter pattern]`; return keptLines.join("\n") + footer; From e6dde99543667ea31157a48a700c877bdca7255b Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Tue, 21 Apr 2026 00:09:59 +0000 Subject: [PATCH 37/39] ci: split jscpd into its own job so the PR checks table has a dedicated row The jscpd duplication check used to run as a step inside the "Typecheck and Test" job, so the PR checks table only showed a single aggregate row for both. Reviewers couldn't tell at a glance whether duplication passed without opening the combined log. Move jscpd into its own `duplication` job named "Duplication check". Small installation cost (extra `npm install`, runs in parallel with the test job) in exchange for clear attribution on the PR checks table. Artifact upload and the jscpd config stay the same. --- .github/workflows/ci.yml | 42 ++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 123a17d..c39022e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,17 +13,15 @@ permissions: pull-requests: write jobs: - test: - name: Typecheck and Test + duplication: + # Code-duplication regression guard. Pulled out of the `test` job so + # the PR checks table shows a dedicated pass/fail row — reviewers see + # at a glance whether the change introduced duplicated code without + # having to open the combined "Typecheck and Test" log. + name: Duplication check runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 - with: - # Full history so the "Build PR coverage comment" step can do - # `git diff origin/...HEAD` to detect touched src/ files. - # Default shallow checkout (depth=1) produces "no merge base". - fetch-depth: 0 - name: Setup Node.js uses: actions/setup-node@v4 @@ -33,10 +31,7 @@ jobs: - name: Install dependencies run: npm install - - name: Typecheck - run: npm run typecheck - - - name: Duplication check (jscpd) + - name: Run jscpd # Threshold 7% is the current baseline (see .jscpd.json). The job # fails if a future change pushes duplication above it, so the # number is a regression guard — reviewers can see the exact @@ -51,6 +46,29 @@ jobs: path: jscpd-report/ if-no-files-found: ignore + test: + name: Typecheck and Test + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + # Full history so the "Build PR coverage comment" step can do + # `git diff origin/...HEAD` to detect touched src/ files. + # Default shallow checkout (depth=1) produces "no merge base". + fetch-depth: 0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Install dependencies + run: npm install + + - name: Typecheck + run: npm run typecheck + - name: Run tests with coverage # Per-file 80% thresholds for PR #60 files are declared in # vitest.config.ts under `coverage.thresholds`. Vitest exits non-zero From 332b4f73a6d4354fe17583d53d136b544558a1b4 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Tue, 21 Apr 2026 00:17:41 +0000 Subject: [PATCH 38/39] test: raise pre-tool-use.ts / memory-path-utils.ts coverage to 90%+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR #63 bot review flagged several source files as under-covered. Added a dedicated branch-coverage suite for the pre-tool-use hook and registered the two now-sufficient files in `vitest.config.ts` so their thresholds are enforced on every run. `claude-code/tests/pre-tool-use-branches.test.ts` — 46 test cases: - Pure helpers: buildAllowDecision, buildReadDecision, rewritePaths, touchesMemory, isSafe (positive + negative paths). - getShellCommand: Grep hit + miss, Read on file + directory, Bash safe + unsafe + non-memory, Glob hit + miss, unknown tool → null. - extractGrepParams: Grep output_mode=count, empty path → "/", Bash delegating to parseBashGrep, non-grep Bash → null, unknown tool → null. - processPreToolUse end-to-end: - returns null for non-memory Bash - returns `[RETRY REQUIRED]` guidance for unsupported commands - falls back to the shell bundle when no config is loaded - Glob + Bash `ls` + Bash `ls -la` long format - ls with both file-level (-rw-) and directory (drwx) entries; also empty-name rows skipped by the `if (!name) continue` guard - cat / head / tail / wc -l / cat | head pipeline - find / find | wc -l - Grep tool delegates to handleGrepDirect; null result falls through to the read/ls branch instead of short-circuiting - direct query throws → shell bundle fallback - Index cache short-circuit: three cases covering the inline readVirtualPathContentsWithCache callback that the bash compiler passes into executeCompiledBashCommand — cache hit, cache miss (writes fresh index), empty cachePaths edge case. Coverage after this suite (measured on pre-tool-use-branches + pre-tool-use-baseline-cloud): src/hooks/pre-tool-use.ts lines 98.9 branches 90.0 funcs 93.8 stmts 98.6 src/hooks/memory-path-utils.ts lines 100 branches 90.9 funcs 100 stmts 100 Both now registered under `coverage.thresholds` at 90 / 90 / 90 / 90 in `vitest.config.ts`, alongside the five existing PR-tracked files. Full suite: 890 / 890 passing (was 844 before this commit). --- .../tests/pre-tool-use-branches.test.ts | 633 ++++++++++++++++++ vitest.config.ts | 12 + 2 files changed, 645 insertions(+) create mode 100644 claude-code/tests/pre-tool-use-branches.test.ts diff --git a/claude-code/tests/pre-tool-use-branches.test.ts b/claude-code/tests/pre-tool-use-branches.test.ts new file mode 100644 index 0000000..4ad05cb --- /dev/null +++ b/claude-code/tests/pre-tool-use-branches.test.ts @@ -0,0 +1,633 @@ +/** + * Branch-coverage suite for `src/hooks/pre-tool-use.ts`. + * + * The PR already has an end-to-end regression suite in + * `pre-tool-use-baseline-cloud.test.ts`, but that file anchors to real + * LoCoMo QAs and only exercises the `/index.md` and `/sessions/*` Read + * paths plus one Bash `cat`. This file fills in the remaining branches + * that the hook supports — Glob, Grep, Bash ls/head/tail/wc/find, the + * unsafe-command guidance path, and the no-config fallback — so the + * whole file can stay above the 90% coverage bar. + */ + +import { describe, expect, it, vi } from "vitest"; +import { + buildAllowDecision, + buildReadDecision, + extractGrepParams, + getShellCommand, + isSafe, + processPreToolUse, + rewritePaths, + touchesMemory, +} from "../../src/hooks/pre-tool-use.js"; + +const BASE_CONFIG = { + token: "t", + apiUrl: "http://example", + orgId: "org", + orgName: "org", + userName: "u", + workspaceId: "default", + apiOrigin: "http://example", +}; + +function makeApi() { + return { query: vi.fn(async () => []) } as any; +} + +describe("pre-tool-use: pure helpers", () => { + it("buildAllowDecision returns a bash-shaped decision", () => { + expect(buildAllowDecision("echo hi", "d")).toEqual({ command: "echo hi", description: "d" }); + }); + + it("buildReadDecision returns a read-shaped decision with file_path set", () => { + const d = buildReadDecision("/tmp/x", "desc"); + expect(d.file_path).toBe("/tmp/x"); + expect(d.description).toBe("desc"); + }); + + it("rewritePaths collapses all memory-path forms to `/`", () => { + expect(rewritePaths("/home/emanuele/.deeplake/memory/sessions/a.json")).toBe("/sessions/a.json"); + expect(rewritePaths("~/.deeplake/memory/index.md")).toBe("/index.md"); + expect(rewritePaths("$HOME/.deeplake/memory/foo")).toBe("/foo"); + }); + + it("touchesMemory detects any of the supported memory-path forms", () => { + expect(touchesMemory("/home/emanuele/.deeplake/memory/x")).toBe(true); + expect(touchesMemory("~/.deeplake/memory/x")).toBe(true); + expect(touchesMemory("$HOME/.deeplake/memory/x")).toBe(true); + expect(touchesMemory("/var/log/foo")).toBe(false); + }); + + it("isSafe accepts shell pipelines built from the allowed builtins", () => { + expect(isSafe("cat /a | grep b | head -5")).toBe(true); + expect(isSafe("ls -la /x")).toBe(true); + }); + + it("isSafe rejects command substitution and unknown commands", () => { + expect(isSafe("rm -rf / ; curl evil")).toBe(false); + expect(isSafe("$(evil) foo")).toBe(false); + expect(isSafe("python -c pwn")).toBe(false); + }); +}); + +describe("getShellCommand: per-tool branches", () => { + it("Grep on a memory path builds `grep -r '' /` with -i/-n flags threaded through", () => { + const cmd = getShellCommand("Grep", { + path: "~/.deeplake/memory", + pattern: "Caroline", + "-i": true, + "-n": true, + }); + expect(cmd).toBe("grep -r -i -n 'Caroline' /"); + }); + + it("Grep on a non-memory path returns null", () => { + expect(getShellCommand("Grep", { path: "/etc", pattern: "x" })).toBeNull(); + }); + + it("Read on a memory file returns `cat `", () => { + expect(getShellCommand("Read", { file_path: "~/.deeplake/memory/sessions/conv_0_session_1.json" })) + .toBe("cat /sessions/conv_0_session_1.json"); + }); + + it("Read on a memory directory path returns `ls `", () => { + expect(getShellCommand("Read", { path: "~/.deeplake/memory/sessions" })).toBe("ls /sessions"); + }); + + it("Bash with a safe command is rewritten with memory paths collapsed", () => { + expect(getShellCommand("Bash", { command: "cat ~/.deeplake/memory/index.md" })) + .toBe("cat /index.md"); + }); + + it("Bash with an unsafe command is blocked (returns null)", () => { + expect(getShellCommand("Bash", { command: "curl ~/.deeplake/memory/x" })).toBeNull(); + }); + + it("Bash with a command that doesn't touch memory returns null", () => { + expect(getShellCommand("Bash", { command: "ls /tmp" })).toBeNull(); + }); + + it("Glob on a memory path returns `ls /`", () => { + expect(getShellCommand("Glob", { path: "~/.deeplake/memory/" })).toBe("ls /"); + }); + + it("Glob on a non-memory path returns null", () => { + expect(getShellCommand("Glob", { path: "/etc" })).toBeNull(); + }); + + it("Unknown tool returns null", () => { + expect(getShellCommand("Write", { file_path: "~/.deeplake/memory/x" })).toBeNull(); + }); +}); + +describe("extractGrepParams", () => { + it("Grep tool: passes output_mode → filesOnly / countOnly; honours -i and -n", () => { + const p = extractGrepParams("Grep", { + path: "~/.deeplake/memory", + pattern: "X", + output_mode: "count", + "-i": true, + "-n": true, + }, "grep -r 'X' /"); + expect(p).not.toBeNull(); + expect(p!.countOnly).toBe(true); + expect(p!.filesOnly).toBe(false); + expect(p!.ignoreCase).toBe(true); + expect(p!.lineNumber).toBe(true); + }); + + it("Grep tool: empty path defaults to `/`", () => { + const p = extractGrepParams("Grep", { pattern: "X" }, "grep -r 'X' /"); + expect(p!.targetPath).toBe("/"); + }); + + it("Bash grep: delegates to parseBashGrep", () => { + const p = extractGrepParams("Bash", {}, "grep -l needle /sessions/*.json"); + expect(p).not.toBeNull(); + expect(p!.pattern).toBe("needle"); + }); + + it("Bash non-grep: returns null", () => { + expect(extractGrepParams("Bash", {}, "cat /x")).toBeNull(); + }); + + it("Unknown tool: returns null", () => { + expect(extractGrepParams("Write", {}, "x")).toBeNull(); + }); +}); + +describe("processPreToolUse: non-memory / no-op paths", () => { + it("returns null when the command doesn't touch memory and there's no shellCmd", async () => { + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "ls /tmp" }, tool_use_id: "t" }, + { config: BASE_CONFIG as any }, + ); + expect(d).toBeNull(); + }); + + it("returns [RETRY REQUIRED] guidance when an unsupported command mentions the memory path", async () => { + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "curl ~/.deeplake/memory/x" }, tool_use_id: "t" }, + { config: BASE_CONFIG as any, logFn: vi.fn() }, + ); + expect(d?.command).toContain("[RETRY REQUIRED]"); + expect(d?.command).toContain("bash builtins"); + }); + + it("falls back to the shell bundle when no config is loaded", async () => { + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/index.md" }, tool_use_id: "t" }, + { config: null as any, shellBundle: "/SHELL" }, + ); + expect(d?.command).toContain(`node "/SHELL" -c`); + expect(d?.description).toContain("[DeepLake shell]"); + }); +}); + +describe("processPreToolUse: Glob / ls branches", () => { + it("Glob on memory routes through listVirtualPathRows and renders a directory listing", async () => { + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/sessions/conv_0_session_1.json", size_bytes: 100 }, + { path: "/sessions/conv_0_session_2.json", size_bytes: 200 }, + { path: "/summaries/alice/s1.md", size_bytes: 50 }, + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Glob", tool_input: { path: "~/.deeplake/memory/" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("sessions/"); + expect(d?.command).toContain("summaries/"); + expect(d?.description).toContain("[DeepLake direct] ls /"); + }); + + it("Bash `ls -la ` returns a long-format listing", async () => { + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/summaries/alice/s1.md", size_bytes: 42 }, + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "ls -la ~/.deeplake/memory/summaries" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("drwxr-xr-x"); + expect(d?.command).toContain("alice/"); + }); + + it("ls on an empty directory reports `(empty directory)` — not a bogus path listing", async () => { + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "ls ~/.deeplake/memory/nope" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn: vi.fn(async () => []) as any, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("(empty directory)"); + }); +}); + +describe("processPreToolUse: Bash read-shape intercepts", () => { + const makeApiWith = (content: string | null) => ({ + api: makeApi(), + readVirtualPathContentFn: vi.fn(async () => content) as any, + }); + + it("`cat ` returns the raw content", async () => { + const { api, readVirtualPathContentFn } = makeApiWith("line1\nline2\nline3"); + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => api), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("line1"); + expect(d?.description).toContain("[DeepLake direct] cat"); + }); + + it("`head -N ` limits to the first N lines", async () => { + const { api, readVirtualPathContentFn } = makeApiWith("l1\nl2\nl3\nl4"); + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "head -2 ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => api), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("l1\\nl2"); + expect(d?.command).not.toContain("l3"); + }); + + it("`tail -N ` limits to the last N lines", async () => { + const { api, readVirtualPathContentFn } = makeApiWith("l1\nl2\nl3\nl4"); + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "tail -2 ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => api), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("l3\\nl4"); + expect(d?.command).not.toContain("l1"); + }); + + it("`wc -l ` returns the line count with the virtual path", async () => { + const { api, readVirtualPathContentFn } = makeApiWith("a\nb\nc"); + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "wc -l ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => api), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("3 /sessions/a.json"); + expect(d?.description).toContain("wc -l"); + }); +}); + +describe("processPreToolUse: find / grep / fallback", () => { + it("Bash `find -name ''` lists matching paths", async () => { + const findVirtualPathsFn = vi.fn(async () => [ + "/sessions/conv_0_session_1.json", + "/sessions/conv_0_session_2.json", + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "find ~/.deeplake/memory/sessions -name '*.json'" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + findVirtualPathsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("/sessions/conv_0_session_1.json"); + expect(d?.description).toContain("[DeepLake direct] find"); + }); + + it("Bash `find … | wc -l` returns the count", async () => { + const findVirtualPathsFn = vi.fn(async () => ["/a.json", "/b.json", "/c.json"]) as any; + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "find ~/.deeplake/memory/sessions -name '*.json' | wc -l" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + findVirtualPathsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain('"3"'); + }); + + it("Grep tool: falls through to handleGrepDirect and returns the matches", async () => { + const handleGrepDirectFn = vi.fn(async () => "/sessions/a.json:match line") as any; + const d = await processPreToolUse( + { + session_id: "s", + tool_name: "Grep", + tool_input: { path: "~/.deeplake/memory", pattern: "match", output_mode: "content" }, + tool_use_id: "t", + }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + handleGrepDirectFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("match line"); + }); + + it("throws in direct-read path → falls back to the shell bundle", async () => { + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readVirtualPathContentFn: vi.fn(async () => { throw new Error("boom"); }) as any, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + shellBundle: "/SHELL", + logFn: vi.fn(), + }, + ); + expect(d?.command).toContain('node "/SHELL" -c'); + }); +}); + +describe("processPreToolUse: index cache short-circuit", () => { + // `readVirtualPathContentsWithCache` is an inline callback the hook + // passes to `executeCompiledBashCommand` so the compiled-segments path + // can reuse the already-fetched /index.md content without hitting SQL + // twice. The happy path is only exercised when the compiler actually + // invokes the callback — these tests simulate exactly that. + + it("returns the cached /index.md immediately without calling readVirtualPathContents", async () => { + const readVirtualPathContentsFn = vi.fn(async (_api, _m, _s, paths: string[]) => + new Map(paths.map(p => [p, `FETCHED:${p}`])), + ) as any; + const readCachedIndexContentFn = vi.fn(() => "CACHED INDEX"); + const writeCachedIndexContentFn = vi.fn(); + + const executeCompiledBashCommandFn = vi.fn(async (_api, _memory, _sessions, _cmd, deps) => { + // Mimic what the real compiler does when it needs /index.md content. + const fetched = await deps.readVirtualPathContentsFn(_api, _memory, _sessions, ["/index.md", "/sessions/x.json"]); + return `idx=${fetched.get("/index.md")}\nx=${fetched.get("/sessions/x.json")}`; + }) as any; + + const d = await processPreToolUse( + { session_id: "s1", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/index.md && cat ~/.deeplake/memory/sessions/x.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readCachedIndexContentFn, + writeCachedIndexContentFn, + readVirtualPathContentsFn, + executeCompiledBashCommandFn, + }, + ); + + expect(d?.command).toContain("idx=CACHED INDEX"); + expect(d?.command).toContain("x=FETCHED:/sessions/x.json"); + // /index.md came from the per-session cache; only the /sessions/x.json + // path went to the API. + expect(readCachedIndexContentFn).toHaveBeenCalledWith("s1"); + expect(readVirtualPathContentsFn).toHaveBeenCalledWith( + expect.anything(), + expect.anything(), + expect.anything(), + ["/sessions/x.json"], + ); + // Cache re-write always fires when /index.md is in the result set — + // idempotent for the hit path (same content in, same content out). + expect(writeCachedIndexContentFn).toHaveBeenCalledWith("s1", "CACHED INDEX"); + }); + + it("writes the freshly-fetched /index.md into the session cache when there's no hit", async () => { + const readVirtualPathContentsFn = vi.fn(async (_api, _m, _s, paths: string[]) => + new Map(paths.map(p => [p, p === "/index.md" ? "FRESH INDEX" : null])), + ) as any; + const readCachedIndexContentFn = vi.fn(() => null); + const writeCachedIndexContentFn = vi.fn(); + + const executeCompiledBashCommandFn = vi.fn(async (_api, _m, _s, _cmd, deps) => { + const fetched = await deps.readVirtualPathContentsFn(_api, _m, _s, ["/index.md"]); + return `out=${fetched.get("/index.md")}`; + }) as any; + + const d = await processPreToolUse( + { session_id: "s2", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/index.md" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readCachedIndexContentFn, + writeCachedIndexContentFn, + readVirtualPathContentsFn, + executeCompiledBashCommandFn, + }, + ); + + expect(d?.command).toContain("FRESH INDEX"); + expect(writeCachedIndexContentFn).toHaveBeenCalledWith("s2", "FRESH INDEX"); + }); + + it("Read on the memory root (no extension in basename) routes to the ls directory branch", async () => { + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/sessions/conv_0_session_1.json", size_bytes: 100 }, + { path: "/summaries/alice/s1.md" /* no size_bytes → null branch */ }, + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Read", tool_input: { file_path: "~/.deeplake/memory/" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("sessions/"); + expect(d?.command).toContain("summaries/"); + }); + + it("Read on a directory with trailing slashes strips them before listing", async () => { + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/sessions/conv_0_session_1.json", size_bytes: 42 }, + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Read", tool_input: { file_path: "~/.deeplake/memory/sessions///" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("conv_0_session_1.json"); + }); + + it("`head ` (no explicit -N) defaults to 10 lines", async () => { + const readVirtualPathContentFn = vi.fn(async () => + Array.from({ length: 20 }, (_, i) => `L${i}`).join("\n") + ) as any; + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "head ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("L0"); + expect(d?.command).toContain("L9"); + expect(d?.command).not.toContain("L10"); + }); + + it("`tail ` (no explicit -N) defaults to the last 10 lines", async () => { + const readVirtualPathContentFn = vi.fn(async () => + Array.from({ length: 20 }, (_, i) => `L${i}`).join("\n") + ) as any; + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "tail ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("L19"); + expect(d?.command).toContain("L10"); + expect(d?.command).not.toContain("L9"); + }); + + it("ls -la listing includes both file entries (-rw-) and directory entries (drwx)", async () => { + // A flat file directly under the listed dir → file entry (isDir=false). + // A nested path under a subdir → directory entry (isDir=true). + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/summaries/top-level.md", size_bytes: 42 }, + { path: "/summaries/alice/s1.md", size_bytes: 100 }, + { path: "/summaries/", size_bytes: 0 }, // empty suffix — skipped by `if (!name) continue` + ]) as any; + + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "ls -la ~/.deeplake/memory/summaries" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + // File entry → -rw-r--r-- prefix + expect(d?.command).toContain("-rw-r--r--"); + expect(d?.command).toContain("top-level.md"); + // Directory entry → drwxr-xr-x prefix + expect(d?.command).toContain("drwxr-xr-x"); + expect(d?.command).toContain("alice/"); + }); + + it("cat | head pipeline routes to the head fast-path", async () => { + const readVirtualPathContentFn = vi.fn(async () => + Array.from({ length: 30 }, (_, i) => `L${i}`).join("\n") + ) as any; + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/sessions/a.json | head -3" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readVirtualPathContentFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("L0"); + expect(d?.command).toContain("L2"); + expect(d?.command).not.toContain("L3"); + }); + + it("Grep whose handleGrepDirect returns null falls through — no decision from grep path", async () => { + const handleGrepDirectFn = vi.fn(async () => null) as any; + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/summaries/alice/s1.md", size_bytes: 100 }, + ]) as any; + // We send a Read on a directory so after grep-null fall-through the ls + // branch takes over with a real decision — proving the flow continues + // past the null grep result instead of erroring. + const d = await processPreToolUse( + { session_id: "s", tool_name: "Read", tool_input: { path: "~/.deeplake/memory/summaries" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + handleGrepDirectFn, + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).toContain("alice/"); + }); + + it("Bash `ls ` without -l uses short-format listing (no permissions prefix)", async () => { + const listVirtualPathRowsFn = vi.fn(async () => [ + { path: "/sessions/conv_0_session_1.json", size_bytes: 100 }, + ]) as any; + const d = await processPreToolUse( + { session_id: "s", tool_name: "Bash", tool_input: { command: "ls ~/.deeplake/memory/sessions" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + listVirtualPathRowsFn, + executeCompiledBashCommandFn: vi.fn(async () => null) as any, + }, + ); + expect(d?.command).not.toContain("drwxr-xr-x"); + expect(d?.command).toContain("conv_0_session_1.json"); + }); + + it("handles the no-paths edge case (empty cachePaths passed by the compiler)", async () => { + const readVirtualPathContentsFn = vi.fn(async () => new Map()) as any; + const readCachedIndexContentFn = vi.fn(() => null); + + const executeCompiledBashCommandFn = vi.fn(async (_api, _m, _s, _cmd, deps) => { + const result = await deps.readVirtualPathContentsFn(_api, _m, _s, []); + return `size=${result.size}`; + }) as any; + + const d = await processPreToolUse( + { session_id: "s3", tool_name: "Bash", tool_input: { command: "cat ~/.deeplake/memory/sessions/a.json" }, tool_use_id: "t" }, + { + config: BASE_CONFIG as any, + createApi: vi.fn(() => makeApi()), + readCachedIndexContentFn, + writeCachedIndexContentFn: vi.fn(), + readVirtualPathContentsFn, + executeCompiledBashCommandFn, + }, + ); + expect(d?.command).toContain("size=0"); + // Didn't touch SQL because paths were empty. + expect(readVirtualPathContentsFn).not.toHaveBeenCalled(); + }); +}); diff --git a/vitest.config.ts b/vitest.config.ts index 2fb2c0b..dccf756 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -82,6 +82,18 @@ export default defineConfig({ functions: 90, lines: 90, }, + "src/hooks/pre-tool-use.ts": { + statements: 90, + branches: 90, + functions: 90, + lines: 90, + }, + "src/hooks/memory-path-utils.ts": { + statements: 90, + branches: 90, + functions: 90, + lines: 90, + }, }, }, }, From f21e693ca947b91b3741adaa6385a3194fb37742 Mon Sep 17 00:00:00 2001 From: Emanuele Fenocchi Date: Tue, 21 Apr 2026 00:21:50 +0000 Subject: [PATCH 39/39] test(pre-tool-use): use homedir() instead of hardcoded /home/emanuele paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CI (HOME=/home/runner) reported two failures on the just-added branch coverage suite: AssertionError: expected '/home/emanuele/.deeplake/memory/...' to be '/sessions/a.json' The `rewritePaths` and `touchesMemory` assertions hardcoded my local home path. The real MEMORY_PATH in production is join(homedir(), ".deeplake", "memory"), so hardcoded absolute paths in tests don't survive anywhere except my workstation — not CI, not another developer's machine. Import `homedir` + `join` from node:os / node:path and build MEM_ABS once at the top of the file. The two affected cases now use template strings so the values match whatever home the test runner is using. The other tests in the suite already use ~-prefixed literals, matched by the TILDE_PATH branch independently of homedir. Verified: `env -i HOME=/home/runner PATH=$PATH npx vitest run` — 46 / 46 pass. --- claude-code/tests/pre-tool-use-branches.test.ts | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/claude-code/tests/pre-tool-use-branches.test.ts b/claude-code/tests/pre-tool-use-branches.test.ts index 4ad05cb..cb3de12 100644 --- a/claude-code/tests/pre-tool-use-branches.test.ts +++ b/claude-code/tests/pre-tool-use-branches.test.ts @@ -11,6 +11,8 @@ */ import { describe, expect, it, vi } from "vitest"; +import { homedir } from "node:os"; +import { join } from "node:path"; import { buildAllowDecision, buildReadDecision, @@ -22,6 +24,11 @@ import { touchesMemory, } from "../../src/hooks/pre-tool-use.js"; +// MEMORY_PATH is `${homedir()}/.deeplake/memory` — differs between CI +// (`/home/runner/...`) and dev (`/home//...`), so any test that +// asserts on the literal form has to build it from homedir() too. +const MEM_ABS = join(homedir(), ".deeplake", "memory"); + const BASE_CONFIG = { token: "t", apiUrl: "http://example", @@ -48,13 +55,13 @@ describe("pre-tool-use: pure helpers", () => { }); it("rewritePaths collapses all memory-path forms to `/`", () => { - expect(rewritePaths("/home/emanuele/.deeplake/memory/sessions/a.json")).toBe("/sessions/a.json"); + expect(rewritePaths(`${MEM_ABS}/sessions/a.json`)).toBe("/sessions/a.json"); expect(rewritePaths("~/.deeplake/memory/index.md")).toBe("/index.md"); expect(rewritePaths("$HOME/.deeplake/memory/foo")).toBe("/foo"); }); it("touchesMemory detects any of the supported memory-path forms", () => { - expect(touchesMemory("/home/emanuele/.deeplake/memory/x")).toBe(true); + expect(touchesMemory(`${MEM_ABS}/x`)).toBe(true); expect(touchesMemory("~/.deeplake/memory/x")).toBe(true); expect(touchesMemory("$HOME/.deeplake/memory/x")).toBe(true); expect(touchesMemory("/var/log/foo")).toBe(false);