diff --git a/docs/PLUGINS.md b/docs/PLUGINS.md index 82610c1..00c12ed 100644 --- a/docs/PLUGINS.md +++ b/docs/PLUGINS.md @@ -548,7 +548,6 @@ export const SCHEMA = { description: "Maximum file size in KB", default: 1024, minimum: 1, - maximum: 10240, }, } satisfies ConfigSchema; diff --git a/plugins/fetch/index.ts b/plugins/fetch/index.ts index 0096fad..91afd83 100644 --- a/plugins/fetch/index.ts +++ b/plugins/fetch/index.ts @@ -95,76 +95,66 @@ export const SCHEMA = { }, connectTimeoutMs: { type: "number" as const, - description: "TCP+TLS connect timeout in milliseconds (max 10000)", + description: "TCP+TLS connect timeout in milliseconds", default: 5000, minimum: 1000, - maximum: 10000, }, readTimeoutMs: { type: "number" as const, - description: "Read timeout in milliseconds (max 30000)", + description: "Read timeout in milliseconds", default: 10000, minimum: 1000, - maximum: 30000, }, maxResponseSizeKb: { type: "number" as const, description: - "Maximum total response body size in KB (max 8192). Responses larger than this are rejected.", + "Maximum total response body size in KB. Responses larger than this are rejected.", default: 1024, minimum: 1, - maximum: 8192, }, readSizeKb: { type: "number" as const, description: - "Maximum body size returned per read() call in KB (max 256). Must be smaller than the sandbox output buffer.", + "Maximum body size returned per read() call in KB. Must be smaller than the sandbox output buffer.", default: 48, minimum: 8, - maximum: 256, }, responseCacheTtlSeconds: { type: "number" as const, description: - "How long response bodies stay cached on the host before expiring (seconds, max 600)", + "How long response bodies stay cached on the host before expiring (seconds)", default: 300, minimum: 30, - maximum: 600, }, maxRequestBodySizeKb: { type: "number" as const, - description: "Maximum POST request body size in KB (max 64)", + description: "Maximum POST request body size in KB", default: 4, minimum: 1, - maximum: 64, }, maxRequestsPerMinute: { type: "number" as const, description: "Maximum fetch calls per minute (sliding window)", default: 30, minimum: 1, - maximum: 60, }, maxRequestsPerHour: { type: "number" as const, description: "Maximum fetch calls per hour (session-scoped)", default: 100, minimum: 1, - maximum: 500, }, maxDomainsPerSession: { type: "number" as const, description: "Maximum unique domains per session", default: 5, minimum: 1, - maximum: 20, }, maxDataReceivedKb: { type: "number" as const, description: "Maximum total response data per session in KB", default: 2048, minimum: 1, - maximum: 16384, }, returnXRequestId: { type: "boolean" as const, @@ -175,10 +165,9 @@ export const SCHEMA = { conditionalCacheMaxEntries: { type: "number" as const, description: - "Maximum number of URLs cached for conditional requests (ETag/Last-Modified). 0 effectively disables caching (min 1).", + "Maximum number of URLs cached for conditional requests (ETag/Last-Modified).", default: 20, minimum: 1, - maximum: 100, }, conditionalCacheTtlSeconds: { type: "number" as const, @@ -186,7 +175,6 @@ export const SCHEMA = { "How long conditional-cache entries remain valid (seconds). After this, the next GET sends a normal request without conditional headers.", default: 600, minimum: 60, - maximum: 3600, }, autoRetryOn429: { type: "boolean" as const, @@ -200,7 +188,6 @@ export const SCHEMA = { "Maximum seconds to wait for a single 429 retry. If server asks for longer, returns error instead of waiting.", default: 30, minimum: 1, - maximum: 120, }, autoRetryMaxAttempts: { type: "number" as const, @@ -208,7 +195,6 @@ export const SCHEMA = { "Maximum number of retry attempts on 429 before giving up and returning the error.", default: 3, minimum: 1, - maximum: 10, }, maxParallelFetches: { type: "number" as const, @@ -216,7 +202,6 @@ export const SCHEMA = { "Maximum concurrent requests for batch operations like fetchBinaryBatch. Higher values speed up bulk downloads but may trigger server rate limits. Default 1 (serial).", default: 1, minimum: 1, - maximum: 10, }, diskCacheMaxMb: { type: "number" as const, @@ -224,7 +209,27 @@ export const SCHEMA = { "Maximum disk cache size in MB for anonymous HTTP responses. Cached in $HOME/.hyperagent/fetch-cache with LFU eviction. Set to 0 to disable.", default: 100, minimum: 0, - maximum: 1000, + }, + maxRedirects: { + type: "number" as const, + description: + "Maximum number of HTTP redirects to follow. Each hop is re-validated against the domain allowlist and SSRF checks.", + default: 5, + minimum: 0, + }, + maxJsonResponseBytes: { + type: "number" as const, + description: + "Maximum response size in bytes for fetchJSON convenience method. Larger responses should use get() + read() streaming.", + default: 1048576, + minimum: 1024, + }, + maxTextResponseBytes: { + type: "number" as const, + description: + "Maximum response size in bytes for fetchText convenience method. Larger responses should use get() + read() streaming.", + default: 2097152, + minimum: 1024, }, } satisfies ConfigSchema; @@ -462,6 +467,7 @@ interface SecureFetchOptions { returnXRequestId: boolean; exactDomains: Set; wildcardDomains: string[]; + maxRedirects: number; signal?: AbortSignal; } @@ -492,10 +498,6 @@ interface SecureFetchSingleOptions extends Omit< * A blocked domain and a successful fetch both take ≥ this long. */ const MIN_RESPONSE_DELAY_MS = 200; -/** Maximum number of HTTP redirects to follow. Each hop is - * re-validated against the domain allowlist and SSRF checks. */ -const MAX_REDIRECTS = 5; - /** HTTP status codes that trigger redirect following. */ const REDIRECT_STATUS_CODES = new Set([301, 302, 303, 307, 308]); @@ -2453,7 +2455,7 @@ function validateRedirectTarget( /** * Perform a secure HTTPS request with redirect following. * - * Wraps secureFetchSingle in a redirect loop (up to MAX_REDIRECTS hops). + * Wraps secureFetchSingle in a redirect loop (up to opts.maxRedirects hops). * Each redirect target is fully re-validated: * - HTTPS only (no protocol downgrade) * - Domain must be in the operator's allowlist @@ -2487,7 +2489,7 @@ async function secureFetch( let currentBody = opts.body; const visited = new Set(); - for (let hop = 0; hop <= MAX_REDIRECTS; hop++) { + for (let hop = 0; hop <= opts.maxRedirects; hop++) { const urlKey = currentUrl.href; // Redirect loop detection @@ -2544,7 +2546,9 @@ async function secureFetch( } // Exhausted redirect budget - return { error: `fetch blocked: too many redirects (max ${MAX_REDIRECTS})` }; + return { + error: `fetch blocked: too many redirects (max ${opts.maxRedirects})`, + }; } // ── Utility ────────────────────────────────────────────────────────── @@ -2837,27 +2841,35 @@ export function createHostFunctions(config?: FetchConfig): FetchHostFunctions { // Enforce manifest-declared minimums as the floor parameter (4th arg). // Previously floor defaulted to 1, so e.g. connectTimeoutMs=1 was silently // accepted despite the manifest declaring minimum: 1000 (audit finding F-08). + // No artificial ceilings — the user decides what's appropriate for their + // hardware and use case. Number.MAX_SAFE_INTEGER means "no ceiling". + const NO_CEIL = Number.MAX_SAFE_INTEGER; const connectTimeoutMs = safeNumericConfig( cfg.connectTimeoutMs, 5000, - 10_000, + NO_CEIL, 1000, ); const readTimeoutMs = safeNumericConfig( cfg.readTimeoutMs, 10_000, - 30_000, + NO_CEIL, 1000, ); const maxResponseBytes = - safeNumericConfig(cfg.maxResponseSizeKb, 256, 8192) * 1024; - const readSizeBytes = safeNumericConfig(cfg.readSizeKb, 48, 256, 8) * 1024; + safeNumericConfig(cfg.maxResponseSizeKb, 1024, NO_CEIL) * 1024; + const readSizeBytes = + safeNumericConfig(cfg.readSizeKb, 48, NO_CEIL, 8) * 1024; const responseCacheTtlMs = - safeNumericConfig(cfg.responseCacheTtlSeconds, 300, 600, 30) * 1000; + safeNumericConfig(cfg.responseCacheTtlSeconds, 300, NO_CEIL, 30) * 1000; const maxRequestBodyBytes = - safeNumericConfig(cfg.maxRequestBodySizeKb, 4, 64) * 1024; - const maxPerMinuteRaw = safeNumericConfig(cfg.maxRequestsPerMinute, 30, 60); - const maxPerHour = safeNumericConfig(cfg.maxRequestsPerHour, 100, 500); + safeNumericConfig(cfg.maxRequestBodySizeKb, 4, NO_CEIL) * 1024; + const maxPerMinuteRaw = safeNumericConfig( + cfg.maxRequestsPerMinute, + 30, + NO_CEIL, + ); + const maxPerHour = safeNumericConfig(cfg.maxRequestsPerHour, 100, NO_CEIL); // Clamp per-minute to never exceed per-hour — an operator setting // 60/minute with 1/hour makes no sense and defeats the hourly cap. const maxPerMinute = Math.min(maxPerMinuteRaw, maxPerHour); @@ -2866,39 +2878,58 @@ export function createHostFunctions(config?: FetchConfig): FetchHostFunctions { `[fetch] maxRequestsPerMinute (${maxPerMinuteRaw}) exceeds maxRequestsPerHour (${maxPerHour}) — clamped to ${maxPerMinute}`, ); } - const maxDomains = safeNumericConfig(cfg.maxDomainsPerSession, 5, 20); + const maxDomains = safeNumericConfig(cfg.maxDomainsPerSession, 5, NO_CEIL); const maxDataReceivedBytes = - safeNumericConfig(cfg.maxDataReceivedKb, 512, 16384) * 1024; + safeNumericConfig(cfg.maxDataReceivedKb, 2048, NO_CEIL) * 1024; const returnXRequestId = !!cfg.returnXRequestId; const conditionalCacheMax = safeNumericConfig( cfg.conditionalCacheMaxEntries, 20, - 100, + NO_CEIL, ); const conditionalCacheTtlMs = - safeNumericConfig(cfg.conditionalCacheTtlSeconds, 600, 3600, 60) * 1000; + safeNumericConfig(cfg.conditionalCacheTtlSeconds, 600, NO_CEIL, 60) * 1000; // Auto-retry on 429 configuration const autoRetryOn429 = !!cfg.autoRetryOn429; const autoRetryMaxWaitSeconds = safeNumericConfig( cfg.autoRetryMaxWaitSeconds, 30, - 120, + NO_CEIL, ); const autoRetryMaxAttempts = safeNumericConfig( cfg.autoRetryMaxAttempts, 3, - 10, + NO_CEIL, ); // Parallel fetch configuration — controls how many requests can be in flight // simultaneously. Default 1 for backwards compatibility (serial). // Higher values speed up batch downloads but may trigger server rate limits. - const maxParallelFetches = safeNumericConfig(cfg.maxParallelFetches, 1, 10); + const maxParallelFetches = safeNumericConfig( + cfg.maxParallelFetches, + 1, + NO_CEIL, + ); + + // Redirect, JSON, and text response size limits — user-configurable. + const maxRedirects = safeNumericConfig(cfg.maxRedirects, 5, NO_CEIL, 0); + const maxJsonResponseBytes = safeNumericConfig( + cfg.maxJsonResponseBytes, + 1024 * 1024, + NO_CEIL, + 1024, + ); + const maxTextResponseBytes = safeNumericConfig( + cfg.maxTextResponseBytes, + 2 * 1024 * 1024, + NO_CEIL, + 1024, + ); // Disk cache configuration — persistent LFU cache in $HOME/.hyperagent/fetch-cache const diskCacheMaxBytes = - safeNumericConfig(cfg.diskCacheMaxMb, 100, 1000, 0) * 1024 * 1024; + safeNumericConfig(cfg.diskCacheMaxMb, 100, NO_CEIL, 0) * 1024 * 1024; // Build allowed header names set (lowercased) const rawAllowedHeaders = Array.isArray(cfg.allowedRequestHeaders) @@ -3177,6 +3208,7 @@ export function createHostFunctions(config?: FetchConfig): FetchHostFunctions { returnXRequestId, exactDomains, wildcardDomains, + maxRedirects, signal, }), safetyTimeout, @@ -3478,12 +3510,13 @@ export function createHostFunctions(config?: FetchConfig): FetchHostFunctions { const body = chunks.join(""); // Guard against oversized responses blowing through heap limits. - // 1MB is reasonable for JSON APIs; larger responses should stream. - const MAX_JSON_BYTES = 1024 * 1024; - if (body.length > MAX_JSON_BYTES) { + // Use Buffer.byteLength for accurate UTF-8 byte count (body.length + // counts UTF-16 code units which undercounts for non-ASCII content). + const jsonBodyBytes = Buffer.byteLength(body, "utf8"); + if (jsonBodyBytes > maxJsonResponseBytes) { throw new Error( `fetchJSON: response too large ` + - `(${body.length} bytes, max ${MAX_JSON_BYTES}). ` + + `(${jsonBodyBytes} bytes, max ${maxJsonResponseBytes}). ` + `Use get() + read() loop to stream large responses instead.`, ); } @@ -3543,12 +3576,12 @@ export function createHostFunctions(config?: FetchConfig): FetchHostFunctions { const body = chunks.join(""); // Guard against oversized responses blowing through heap limits. - // 2MB is reasonable for text content like HTML pages. - const MAX_TEXT_BYTES = 2 * 1024 * 1024; - if (body.length > MAX_TEXT_BYTES) { + // Use Buffer.byteLength for accurate UTF-8 byte count. + const textBodyBytes = Buffer.byteLength(body, "utf8"); + if (textBodyBytes > maxTextResponseBytes) { throw new Error( `fetchText: response too large ` + - `(${body.length} bytes, max ${MAX_TEXT_BYTES}). ` + + `(${textBodyBytes} bytes, max ${maxTextResponseBytes}). ` + `Use get() + read() loop to stream large responses instead.`, ); } diff --git a/plugins/fs-read/index.ts b/plugins/fs-read/index.ts index a935532..00b23ca 100644 --- a/plugins/fs-read/index.ts +++ b/plugins/fs-read/index.ts @@ -61,10 +61,22 @@ export const SCHEMA = { maxFileSizeKb: { type: "number" as const, description: - "Maximum total file size allowed for reads in kilobytes. Files larger than this are rejected outright. Clamped to 10240 (10 MB).", + "Maximum total file size allowed for reads in kilobytes. Files larger than this are rejected outright.", default: 10240, minimum: 0, - maximum: 10240, + }, + maxReadChunkKb: { + type: "number" as const, + description: + "Maximum data returned by a single readFile/readFileBinary call in kilobytes. Tied to the Hyperlight input buffer size — raising this beyond the configured buffer will cause VM faults.", + default: 1024, + minimum: 64, + }, + maxListResults: { + type: "number" as const, + description: "Maximum number of entries returned by a single listDir call.", + default: 1000, + minimum: 10, }, } satisfies ConfigSchema; @@ -116,23 +128,6 @@ export interface StatResult { // ── Constants ─────────────────────────────────────────────────────── -/** Maximum allowed config value for size limits (10 MB). */ -const MAX_SIZE_LIMIT_KB = 10240; - -/** - * Maximum data returned by a single readFile call (1 MB). - * - * This is a hard limit — NOT configurable. It exists because readFile - * return values transit the Hyperlight input buffer (host→guest shared - * memory). The buffer must be at least MAX_READ_CHUNK_KB + 16 KB of - * protocol framing. Files larger than this should be read via - * readFileChunk with explicit offset/length. - * - * DO NOT raise this without also raising DEFAULT_INPUT_BUFFER_KB in - * sandbox-tool.js — exceeding the buffer causes a hard VM fault. - */ -const MAX_READ_CHUNK_KB = 1024; - /** * Allowed encoding values for read operations. * "utf8" (default) returns text; "base64" returns raw bytes as a @@ -141,9 +136,6 @@ const MAX_READ_CHUNK_KB = 1024; */ const ALLOWED_ENCODINGS = new Set(["utf8", "base64"]); -/** Maximum number of entries returned by listDir. */ -const MAX_LIST_RESULTS = 1000; - /** Length of random suffix for temp directory names. */ const TEMP_DIR_RANDOM_BYTES = 8; @@ -227,13 +219,22 @@ export function createHostFunctions( } // safeNumericConfig rejects NaN/Infinity/negative and clamps to ceiling. + // No artificial ceilings — pass Number.MAX_SAFE_INTEGER so the user + // decides based on their hardware and sandbox buffer configuration. + const NO_CEIL = Number.MAX_SAFE_INTEGER; const maxFileBytes = - safeNumericConfig(cfg.maxFileSizeKb, MAX_SIZE_LIMIT_KB) * 1024; + safeNumericConfig(cfg.maxFileSizeKb, 10240, NO_CEIL) * 1024; + + // Per-call chunk limit — configurable via maxReadChunkKb (default 1 MB). + // Note: raising this beyond the Hyperlight input buffer size will cause + // VM faults. The user is responsible for matching buffer + chunk config. + const maxReadChunkBytes = + safeNumericConfig(cfg.maxReadChunkKb, 1024, NO_CEIL) * 1024; - // Per-call chunk limit — hard cap tied to sandbox buffer size. - // readFile rejects files above this; readFileChunk enforces it - // on the length parameter. Not configurable by design. - const maxReadChunkBytes = MAX_READ_CHUNK_KB * 1024; + // Maximum directory listing results — configurable via maxListResults. + const maxListEntries = Math.floor( + safeNumericConfig(cfg.maxListResults, 1000, NO_CEIL), + ); // O_NOFOLLOW atomically rejects symlinks at open() on POSIX. // On Windows it doesn't exist — we rely on the lstatSync pre-check @@ -276,7 +277,7 @@ export function createHostFunctions( } if (fileStat.size > maxReadChunkBytes) { return { - error: `File too large for single read: ${fileStat.size} bytes exceeds per-call limit of ${MAX_READ_CHUNK_KB}KB. Use readFileChunk(path, offsetBytes, lengthBytes) to read in chunks.`, + error: `File too large for single read: ${fileStat.size} bytes exceeds per-call limit of ${maxReadChunkBytes / 1024}KB. Use readFileChunk(path, offsetBytes, lengthBytes) to read in chunks.`, }; } @@ -417,7 +418,7 @@ export function createHostFunctions( if (entry.name.startsWith(".")) continue; if (entry.isSymbolicLink()) continue; if (!entry.isFile() && !entry.isDirectory()) continue; - if (results.length >= MAX_LIST_RESULTS) break; + if (results.length >= maxListEntries) break; results.push({ name: entry.name, @@ -489,7 +490,7 @@ export function createHostFunctions( if (fileStat.size > maxReadChunkBytes) { throw new Error( `File too large for single read: ${fileStat.size} bytes exceeds ` + - `per-call limit of ${MAX_READ_CHUNK_KB}KB. ` + + `per-call limit of ${maxReadChunkBytes / 1024}KB. ` + `Use readFileChunkBinary(path, offsetBytes, lengthBytes) to read in chunks.`, ); } diff --git a/plugins/fs-read/plugin.json b/plugins/fs-read/plugin.json index a5079f3..af9cdcc 100644 --- a/plugins/fs-read/plugin.json +++ b/plugins/fs-read/plugin.json @@ -9,7 +9,7 @@ "All paths are relative to baseDir — absolute paths become relative", "Symlinks are REJECTED (security measure)", "Dotfiles are BLOCKED (no .env, .git, etc.)", - "Per-call limit: 1MB — use readFileChunk for larger files", + "Per-call default: 1MB (configurable via maxReadChunkKb) — use readFileChunk for larger files", "Text functions return {error}, binary functions THROW" ], "commonPatterns": [ diff --git a/plugins/fs-write/index.ts b/plugins/fs-write/index.ts index 466833a..b90b576 100644 --- a/plugins/fs-write/index.ts +++ b/plugins/fs-write/index.ts @@ -63,18 +63,23 @@ export const SCHEMA = { maxWriteSizeKb: { type: "number" as const, description: - "Maximum per-file cumulative size for writes/appends in kilobytes. Cumulative for appends (existing + new). Set to 0 to block non-empty writes. Clamped to 51200 (50 MB).", + "Maximum per-file cumulative size for writes/appends in kilobytes. Cumulative for appends (existing + new). Set to 0 to block non-empty writes.", default: 20480, minimum: 0, - maximum: 51200, }, maxEntries: { type: "number" as const, description: - "Maximum number of files and directories that can be created (combined total). Prevents inode/disk exhaustion from runaway writes. Set to 0 to block all creation. Clamped to 10000.", + "Maximum number of files and directories that can be created (combined total). Prevents inode/disk exhaustion from runaway writes. Set to 0 to block all creation.", default: 1000, minimum: 0, - maximum: 10000, + }, + maxWriteChunkKb: { + type: "number" as const, + description: + "Maximum data accepted by a single writeFile/appendFile call in kilobytes. Tied to the Hyperlight output buffer size — raising this beyond the configured buffer will cause VM faults.", + default: 2048, + minimum: 64, }, } satisfies ConfigSchema; @@ -105,23 +110,11 @@ export interface MkdirResult { // ── Constants ─────────────────────────────────────────────────────── -/** Maximum allowed config value for size limits (50 MB). */ -const MAX_SIZE_LIMIT_KB = 51200; - -/** - * Maximum data accepted by a single writeFile/appendFile call (2 MB). - * Increased from 1MB to support larger single writes when output buffer is configured. - */ -const MAX_WRITE_CHUNK_KB = 2048; - /** * Allowed encoding values for write operations. */ const ALLOWED_ENCODINGS = new Set(["utf8", "base64"]); -/** Maximum allowed config value for entry creation limit. */ -const MAX_ENTRIES_LIMIT = 10000; - /** File creation mode — owner read/write only. */ const FILE_MODE = 0o600; @@ -194,9 +187,17 @@ export function createHostFunctions( ); } + // No artificial ceilings — pass Number.MAX_SAFE_INTEGER so the user + // decides based on their hardware and sandbox buffer configuration. + const NO_CEIL = Number.MAX_SAFE_INTEGER; const maxWriteBytes = - safeNumericConfig(cfg.maxWriteSizeKb, 20480, MAX_SIZE_LIMIT_KB) * 1024; - const maxWriteChunkBytes = MAX_WRITE_CHUNK_KB * 1024; + safeNumericConfig(cfg.maxWriteSizeKb, 20480, NO_CEIL) * 1024; + + // Per-call chunk limit — configurable via maxWriteChunkKb (default 2 MB). + // Note: raising this beyond the Hyperlight output buffer size will cause + // VM faults. The user is responsible for matching buffer + chunk config. + const maxWriteChunkBytes = + safeNumericConfig(cfg.maxWriteChunkKb, 2048, NO_CEIL) * 1024; // O_NOFOLLOW atomically rejects symlinks at open() on POSIX. // On Windows it doesn't exist — we rely on the lstatSync pre-check @@ -206,7 +207,7 @@ export function createHostFunctions( const O_NOFOLLOW = FS_CONSTANTS.O_NOFOLLOW ?? 0; const maxEntries = Math.floor( - safeNumericConfig(cfg.maxEntries, 500, MAX_ENTRIES_LIMIT), + safeNumericConfig(cfg.maxEntries, 1000, NO_CEIL), ); let entriesCreated = 0; @@ -242,7 +243,7 @@ export function createHostFunctions( if (contentBytes > maxWriteChunkBytes) { return { - error: `Content too large for single write: ${contentBytes} bytes exceeds per-call limit of ${MAX_WRITE_CHUNK_KB}KB. Split into multiple appendFile calls.`, + error: `Content too large for single write: ${contentBytes} bytes exceeds per-call limit of ${maxWriteChunkBytes / 1024}KB. Split into multiple appendFile calls.`, }; } if (contentBytes > maxWriteBytes) { @@ -332,7 +333,7 @@ export function createHostFunctions( if (contentBytes > maxWriteChunkBytes) { return { - error: `Append content too large for single call: ${contentBytes} bytes exceeds per-call limit of ${MAX_WRITE_CHUNK_KB}KB. Split into smaller appendFile calls.`, + error: `Append content too large for single call: ${contentBytes} bytes exceeds per-call limit of ${maxWriteChunkBytes / 1024}KB. Split into smaller appendFile calls.`, }; } if (contentBytes > maxWriteBytes) { @@ -434,7 +435,7 @@ export function createHostFunctions( : " Split into multiple appendFileBinary calls."; throw new Error( `Content too large for single write: ${contentBytes} bytes exceeds ` + - `per-call limit of ${MAX_WRITE_CHUNK_KB}KB.${hint}`, + `per-call limit of ${maxWriteChunkBytes / 1024}KB.${hint}`, ); } if (contentBytes > maxWriteBytes) { @@ -518,7 +519,7 @@ export function createHostFunctions( if (contentBytes > maxWriteChunkBytes) { throw new Error( `Append content too large for single call: ${contentBytes} bytes exceeds ` + - `per-call limit of ${MAX_WRITE_CHUNK_KB}KB. ` + + `per-call limit of ${maxWriteChunkBytes / 1024}KB. ` + `Split into smaller appendFileBinary calls.`, ); } diff --git a/plugins/fs-write/plugin.json b/plugins/fs-write/plugin.json index 19e65f4..0c3ba3c 100644 --- a/plugins/fs-write/plugin.json +++ b/plugins/fs-write/plugin.json @@ -10,7 +10,7 @@ "All paths are relative to baseDir — absolute paths become relative", "Symlinks are REJECTED when writing", "Dotfiles are BLOCKED (no .env, .git, etc.)", - "Per-call limit: 2MB — use multiple appends for larger files", + "Per-call default: 2MB (configurable via maxWriteChunkKb) — use multiple appends for larger files", "Text functions return {error}, binary functions THROW", "writeFileBinary for Uint8Array (PPTX, images), writeFile for text" ], diff --git a/tests/fetch.test.ts b/tests/fetch.test.ts index a6aadbf..807a5e2 100644 --- a/tests/fetch.test.ts +++ b/tests/fetch.test.ts @@ -2757,4 +2757,58 @@ describe("maxParallelFetches", () => { const results = await fns.fetch.fetchBinaryBatch([]); expect(results).toEqual([]); }); + + // ── configurable limits ────────────────────────────────────── + + describe("configurable limits", () => { + it("should accept maxRedirects config without error", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxRedirects: 0, + }); + expect(fns.fetch.get).toBeDefined(); + }); + + it("should accept large maxRedirects config", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxRedirects: 100, + }); + expect(fns.fetch.get).toBeDefined(); + }); + + it("should accept maxJsonResponseBytes config", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxJsonResponseBytes: 50 * 1024 * 1024, // 50 MB + }); + expect(fns.fetch.fetchJSON).toBeDefined(); + }); + + it("should accept maxTextResponseBytes config", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxTextResponseBytes: 100 * 1024 * 1024, // 100 MB + }); + expect(fns.fetch.fetchText).toBeDefined(); + }); + + it("should accept very large maxDataReceivedKb for 2GB+ sessions", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxDataReceivedKb: 2 * 1024 * 1024, // 2 GB + }); + expect(fns.fetch.get).toBeDefined(); + }); + + it("should accept uncapped rate limit configs", () => { + const fns = createHostFunctions({ + allowedDomains: ["example.com"], + maxRequestsPerMinute: 1000, + maxRequestsPerHour: 50000, + maxDomainsPerSession: 500, + }); + expect(fns.fetch.get).toBeDefined(); + }); + }); }); diff --git a/tests/fs-read.test.ts b/tests/fs-read.test.ts index a52ed9d..8350b36 100644 --- a/tests/fs-read.test.ts +++ b/tests/fs-read.test.ts @@ -781,4 +781,58 @@ describe("createHostFunctions", () => { expect(result.error).toContain("maximum length"); }); }); + + // ── configurable limits ────────────────────────────────────── + + describe("configurable limits", () => { + it("should enforce custom maxReadChunkKb", () => { + // Set a tiny per-call chunk limit (1 KB) + const smallChunkFns = createHostFunctions({ + baseDir, + maxReadChunkKb: 1, + })["fs-read"]; + // Write a file larger than 1 KB + writeFileSync(join(baseDir, "big-for-chunk.txt"), "x".repeat(2048)); + const result = smallChunkFns.readFile("big-for-chunk.txt"); + expect(result.error).toContain("per-call limit"); + }); + + it("should allow reads when file is within custom maxReadChunkKb", () => { + const smallChunkFns = createHostFunctions({ + baseDir, + maxReadChunkKb: 4, + })["fs-read"]; + writeFileSync(join(baseDir, "small-for-chunk.txt"), "hello"); + const result = smallChunkFns.readFile("small-for-chunk.txt"); + expect(result.content).toBe("hello"); + }); + + it("should enforce custom maxListResults", () => { + // Create 10 files but limit to 3 results + const limitedFns = createHostFunctions({ + baseDir, + maxListResults: 3, + })["fs-read"]; + for (let i = 0; i < 10; i++) { + writeFileSync(join(baseDir, `item-${i}.txt`), `${i}`); + } + const result = limitedFns.listDir("."); + expect(Array.isArray(result)).toBe(true); + expect((result as Array).length).toBeLessThanOrEqual(3); + }); + + it("should allow maxFileSizeKb above the old 10 MB ceiling", () => { + // Verify the ceiling bug is fixed — 20480 KB should not be + // clamped to 10240 by safeNumericConfig's default ceiling. + const largeFns = createHostFunctions({ + baseDir, + maxFileSizeKb: 20480, + })["fs-read"]; + // Write a 15 KB file — should be accepted with 20 MB limit + writeFileSync(join(baseDir, "large-ok.txt"), "x".repeat(15000)); + const result = largeFns.readFile("large-ok.txt"); + expect(result.content).toBeDefined(); + expect(result.error).toBeUndefined(); + }); + }); }); diff --git a/tests/fs-write.test.ts b/tests/fs-write.test.ts index d61f4f7..62be833 100644 --- a/tests/fs-write.test.ts +++ b/tests/fs-write.test.ts @@ -560,4 +560,74 @@ describe("createHostFunctions", () => { } }); }); + + // ── configurable limits ────────────────────────────────────── + + describe("configurable limits", () => { + it("should enforce custom maxWriteChunkKb for text writes", () => { + const chunkDir = makeTempDir(); + try { + // 1 KB per-call limit + const chunkFns = createHostFunctions({ + baseDir: chunkDir, + maxWriteChunkKb: 1, + maxWriteSizeKb: 100, + })["fs-write"]; + // 2 KB content exceeds 1 KB per-call limit + const result = chunkFns.writeFile("big.txt", "x".repeat(2048)); + expect(result.error).toContain("per-call limit"); + } finally { + rmSync(chunkDir, { recursive: true, force: true }); + } + }); + + it("should enforce custom maxWriteChunkKb for binary writes", () => { + const chunkDir = makeTempDir(); + try { + const chunkFns = createHostFunctions({ + baseDir: chunkDir, + maxWriteChunkKb: 1, + maxWriteSizeKb: 100, + })["fs-write"]; + const data = new Uint8Array(2048); // 2 KB > 1 KB limit + expect(() => chunkFns.writeFileBinary("big.bin", data)).toThrow( + "per-call limit", + ); + } finally { + rmSync(chunkDir, { recursive: true, force: true }); + } + }); + + it("should allow writes within custom maxWriteChunkKb", () => { + const chunkDir = makeTempDir(); + try { + const chunkFns = createHostFunctions({ + baseDir: chunkDir, + maxWriteChunkKb: 4, + maxWriteSizeKb: 100, + })["fs-write"]; + const result = chunkFns.writeFile("ok.txt", "hello"); + expect(result.ok).toBe(true); + } finally { + rmSync(chunkDir, { recursive: true, force: true }); + } + }); + + it("should allow maxWriteSizeKb above the old 50 MB ceiling", () => { + const bigDir = makeTempDir(); + try { + // Verify the ceiling bug is fixed — 51200+ should not be + // clamped to 10240 by safeNumericConfig's default ceiling. + const bigFns = createHostFunctions({ + baseDir: bigDir, + maxWriteSizeKb: 102400, // 100 MB + })["fs-write"]; + // Write a small file — should be accepted + const result = bigFns.writeFile("ok.txt", "hello"); + expect(result.ok).toBe(true); + } finally { + rmSync(bigDir, { recursive: true, force: true }); + } + }); + }); });