diff --git a/.changeset/forward-fetch-headers-options.md b/.changeset/forward-fetch-headers-options.md new file mode 100644 index 000000000..b907a704f --- /dev/null +++ b/.changeset/forward-fetch-headers-options.md @@ -0,0 +1,5 @@ +--- +"@browserbasehq/stagehand": patch +--- + +fix: forward fetch and headers options to AI SDK providers to enable proxy authentication, request logging, and custom retry logic diff --git a/packages/core/examples/test-custom-fetch.ts b/packages/core/examples/test-custom-fetch.ts new file mode 100644 index 000000000..9ea6c9c50 --- /dev/null +++ b/packages/core/examples/test-custom-fetch.ts @@ -0,0 +1,82 @@ +import { Stagehand } from "../lib/v3"; + +/** + * Test script to verify custom fetch and headers are forwarded to AI SDK providers + * + * This demonstrates the fix for the bug where custom fetch functions and headers + * were being silently ignored when using AI SDK providers (e.g., "openai/gpt-4o-mini"). + * + * Expected behavior: + * - Custom fetch function should be called for all LLM API requests + * - Custom headers should be included in the requests + * - This enables use cases like: proxy authentication, request logging, retry logic + */ + +async function main() { + // Track if custom fetch was called + let fetchCallCount = 0; + const customHeaders: string[] = []; + + // Create custom fetch function + const customFetch: typeof fetch = async (url, options) => { + fetchCallCount++; + console.log(`✅ Custom fetch called (${fetchCallCount} times)`); + console.log(` URL: ${url}`); + + // Log custom headers if present + if (options?.headers) { + const headers = new Headers(options.headers); + headers.forEach((value, key) => { + if (key.toLowerCase().startsWith('x-custom')) { + customHeaders.push(`${key}: ${value}`); + console.log(` Custom header: ${key}: ${value}`); + } + }); + } + + return fetch(url, options); + }; + + // Initialize Stagehand with custom fetch and headers + console.log("Initializing Stagehand with custom fetch and headers...\n"); + + const stagehand = new Stagehand({ + model: { + modelName: "openai/gpt-4o-mini", + apiKey: process.env.OPENAI_API_KEY, + fetch: customFetch, + headers: { + "X-Custom-Header": "test-value", + "X-Custom-Proxy-Auth": "proxy-token-123" + } + } as any, + env: "LOCAL" + }); + + await stagehand.init(); + + try { + console.log("Making a simple LLM call via act()...\n"); + + // Navigate to a simple page + await stagehand.context.pages()[0].goto("https://example.com"); + + // Make an act() call that will use the LLM + await stagehand.act("find the heading on the page"); + + console.log("\n=== Test Results ==="); + if (fetchCallCount > 0) { + console.log(`✅ SUCCESS: Custom fetch was called ${fetchCallCount} times`); + console.log(`✅ Custom headers detected: ${customHeaders.length > 0 ? customHeaders.join(", ") : "None (may be overridden by SDK)"}`); + } else { + console.log("❌ FAILURE: Custom fetch was NOT called"); + console.log(" This indicates the bug still exists."); + } + } catch (error) { + console.error("\n❌ Error during test:", error); + } finally { + await stagehand.close(); + } +} + +main().catch(console.error); diff --git a/packages/core/lib/v3/llm/LLMProvider.ts b/packages/core/lib/v3/llm/LLMProvider.ts index 7c16f2118..fae3afa44 100644 --- a/packages/core/lib/v3/llm/LLMProvider.ts +++ b/packages/core/lib/v3/llm/LLMProvider.ts @@ -16,6 +16,11 @@ import { GoogleClient } from "./GoogleClient"; import { GroqClient } from "./GroqClient"; import { LLMClient } from "./LLMClient"; import { OpenAIClient } from "./OpenAIClient"; + +interface ExtendedClientOptions { + headers?: Record; + fetch?: typeof globalThis.fetch; +} import { openai, createOpenAI } from "@ai-sdk/openai"; import { anthropic, createAnthropic } from "@ai-sdk/anthropic"; import { google, createGoogleGenerativeAI } from "@ai-sdk/google"; @@ -98,33 +103,43 @@ export function getAISDKLanguageModel( subModelName: string, apiKey?: string, baseURL?: string, + headers?: Record, + fetch?: typeof globalThis.fetch, ) { + const creator = AISDKProvidersWithAPIKey[subProvider]; + if (!creator) { + throw new UnsupportedAISDKModelProviderError( + subProvider, + Object.keys(AISDKProvidersWithAPIKey), + ); + } + + // Build provider config - all fields are optional + // When apiKey is not provided, creator functions automatically use environment variables + const providerConfig: { + apiKey?: string; + baseURL?: string; + headers?: Record; + fetch?: typeof globalThis.fetch; + } = {}; + if (apiKey) { - const creator = AISDKProvidersWithAPIKey[subProvider]; - if (!creator) { - throw new UnsupportedAISDKModelProviderError( - subProvider, - Object.keys(AISDKProvidersWithAPIKey), - ); - } - // Create the provider instance with the API key and baseURL if provided - const providerConfig: { apiKey: string; baseURL?: string } = { apiKey }; - if (baseURL) { - providerConfig.baseURL = baseURL; - } - const provider = creator(providerConfig); - // Get the specific model from the provider - return provider(subModelName); - } else { - const provider = AISDKProviders[subProvider]; - if (!provider) { - throw new UnsupportedAISDKModelProviderError( - subProvider, - Object.keys(AISDKProviders), - ); - } - return provider(subModelName); + providerConfig.apiKey = apiKey; } + if (baseURL) { + providerConfig.baseURL = baseURL; + } + if (headers) { + providerConfig.headers = headers; + } + if (fetch) { + providerConfig.fetch = fetch; + } + + // Type assertion needed: AI SDK types require apiKey, but runtime accepts optional apiKey + // At runtime, when apiKey is not provided, creators automatically use environment variables + const provider = creator(providerConfig as { apiKey: string }); + return provider(subModelName); } export class LLMProvider { @@ -148,6 +163,8 @@ export class LLMProvider { subModelName, clientOptions?.apiKey, clientOptions?.baseURL, + (clientOptions as ExtendedClientOptions)?.headers, + (clientOptions as ExtendedClientOptions)?.fetch, ); return new AISdkClient({