diff --git a/docs/adapters/grok.md b/docs/adapters/grok.md new file mode 100644 index 00000000..9e3afe4b --- /dev/null +++ b/docs/adapters/grok.md @@ -0,0 +1,232 @@ +--- +title: Grok (xAI) +id: grok-adapter +order: 5 +--- + +The Grok adapter provides access to xAI's Grok models, including Grok 4.1, Grok 4, Grok 3, and image generation with Grok 2 Image. + +## Installation + +```bash +npm install @tanstack/ai-grok +``` + +## Basic Usage + +```typescript +import { chat } from "@tanstack/ai"; +import { grokText } from "@tanstack/ai-grok"; + +const stream = chat({ + adapter: grokText("grok-4"), + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Basic Usage - Custom API Key + +```typescript +import { chat } from "@tanstack/ai"; +import { createGrokText } from "@tanstack/ai-grok"; + +const adapter = createGrokText("grok-4", process.env.XAI_API_KEY!); + +const stream = chat({ + adapter, + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Configuration + +```typescript +import { createGrokText, type GrokTextConfig } from "@tanstack/ai-grok"; + +const config: Omit = { + baseURL: "https://api.x.ai/v1", // Optional, this is the default +}; + +const adapter = createGrokText("grok-4", process.env.XAI_API_KEY!, config); +``` + +## Example: Chat Completion + +```typescript +import { chat, toStreamResponse } from "@tanstack/ai"; +import { grokText } from "@tanstack/ai-grok"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: grokText("grok-4"), + messages, + }); + + return toStreamResponse(stream); +} +``` + +## Example: With Tools + +```typescript +import { chat, toolDefinition } from "@tanstack/ai"; +import { grokText } from "@tanstack/ai-grok"; +import { z } from "zod"; + +const getWeatherDef = toolDefinition({ + name: "get_weather", + description: "Get the current weather", + inputSchema: z.object({ + location: z.string(), + }), +}); + +const getWeather = getWeatherDef.server(async ({ location }) => { + // Fetch weather data + return { temperature: 72, conditions: "sunny" }; +}); + +const stream = chat({ + adapter: grokText("grok-4-1-fast-reasoning"), + messages, + tools: [getWeather], +}); +``` + +## Model Options + +Grok supports various provider-specific options: + +```typescript +const stream = chat({ + adapter: grokText("grok-4"), + messages, + modelOptions: { + frequency_penalty: 0.5, + presence_penalty: 0.5, + stop: ["END"], + }, +}); +``` + +## Summarization + +Summarize long text content: + +```typescript +import { summarize } from "@tanstack/ai"; +import { grokSummarize } from "@tanstack/ai-grok"; + +const result = await summarize({ + adapter: grokSummarize("grok-4"), + text: "Your long text to summarize...", + maxLength: 100, + style: "concise", // "concise" | "bullet-points" | "paragraph" +}); + +console.log(result.summary); +``` + +## Image Generation + +Generate images with Grok 2 Image: + +```typescript +import { generateImage } from "@tanstack/ai"; +import { grokImage } from "@tanstack/ai-grok"; + +const result = await generateImage({ + adapter: grokImage("grok-2-image-1212"), + prompt: "A futuristic cityscape at sunset", + numberOfImages: 1, +}); + +console.log(result.images); +``` + +## Environment Variables + +Set your API key in environment variables: + +```bash +XAI_API_KEY=xai-... +``` + +## Implementation Notes + +### Why Chat Completions API (Not Responses API) + +The Grok adapter uses xAI's **Chat Completions API** (`/v1/chat/completions`) rather than the Responses API (`/v1/responses`). This is an intentional architectural decision: + +1. **User-Defined Tools**: The Chat Completions API supports user-defined function tools, which is essential for TanStack AI's tool calling functionality. The Responses API only supports xAI's server-side tools (web search, X search, code execution). + +2. **Full Tool Calling Support**: With Chat Completions, you can define custom tools with Zod schemas that run in your application. The Responses API restricts you to xAI's built-in tools only. + +3. **Streaming Compatibility**: The streaming event format differs significantly between the two APIs. Chat Completions uses `delta.tool_calls` with argument accumulation, while Responses API uses `response.output_item.added` and `response.function_call_arguments.done`. + +4. **OpenAI SDK Compatibility**: xAI's Chat Completions API is fully compatible with the OpenAI SDK, making integration straightforward while maintaining full feature parity for tool calling. + +If you need xAI's server-side tools (web search, X/Twitter search, code execution), you would need to use the Responses API directly. However, for most use cases requiring custom tool definitions, the Chat Completions API is the correct choice. + +## API Reference + +### `grokText(model, config?)` + +Creates a Grok text adapter using environment variables. + +**Parameters:** + +- `model` - The model name (e.g., `'grok-4'`, `'grok-4-1-fast-reasoning'`) +- `config.baseURL?` - Custom base URL (optional) + +**Returns:** A Grok text adapter instance. + +### `createGrokText(model, apiKey, config?)` + +Creates a Grok text adapter with an explicit API key. + +**Parameters:** + +- `model` - The model name +- `apiKey` - Your xAI API key +- `config.baseURL?` - Custom base URL (optional) + +**Returns:** A Grok text adapter instance. + +### `grokSummarize(model, config?)` + +Creates a Grok summarization adapter using environment variables. + +**Returns:** A Grok summarize adapter instance. + +### `createGrokSummarize(model, apiKey, config?)` + +Creates a Grok summarization adapter with an explicit API key. + +**Returns:** A Grok summarize adapter instance. + +### `grokImage(model, config?)` + +Creates a Grok image generation adapter using environment variables. + +**Returns:** A Grok image adapter instance. + +### `createGrokImage(model, apiKey, config?)` + +Creates a Grok image generation adapter with an explicit API key. + +**Returns:** A Grok image adapter instance. + +## Limitations + +- **Text-to-Speech**: Grok does not support text-to-speech. Use OpenAI for TTS. +- **Transcription**: Grok does not support audio transcription. Use OpenAI's Whisper. +- **Responses API Tools**: Server-side tools (web search, X search, code execution) are not supported through this adapter. Use the Chat Completions API with custom tools instead. + +## Next Steps + +- [Getting Started](../getting-started/quick-start) - Learn the basics +- [Tools Guide](../guides/tools) - Learn about tools +- [Other Adapters](./openai) - Explore other providers diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index eb33f5eb..f2ff15ef 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -14,6 +14,7 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", diff --git a/examples/ts-react-chat/src/lib/model-selection.ts b/examples/ts-react-chat/src/lib/model-selection.ts index 0412d275..4d40ccc7 100644 --- a/examples/ts-react-chat/src/lib/model-selection.ts +++ b/examples/ts-react-chat/src/lib/model-selection.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' export interface ModelOption { provider: Provider @@ -67,6 +67,23 @@ export const MODEL_OPTIONS: Array = [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Grok + { + provider: 'grok', + model: 'grok-3', + label: 'Grok - Grok 3', + }, + { + provider: 'grok', + model: 'grok-3-mini', + label: 'Grok - Grok 3 Mini', + }, + { + provider: 'grok', + model: 'grok-2-vision-1212', + label: 'Grok - Grok 2 Vision', + }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index da5c9c26..6b045f5f 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -9,6 +9,7 @@ import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' +import { grokText } from '@tanstack/ai-grok' import type { AnyTextAdapter } from '@tanstack/ai' import { addToCartToolDef, @@ -18,7 +19,7 @@ import { recommendGuitarToolDef, } from '@/lib/guitar-tools' -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -90,6 +91,10 @@ export const Route = createFileRoute('/api/tanchat')({ (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', ), }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as 'grok-3'), + }), ollama: () => createChatOptions({ adapter: ollamaText((model || 'mistral:7b') as 'mistral:7b'), diff --git a/packages/typescript/ai-grok/CHANGELOG.md b/packages/typescript/ai-grok/CHANGELOG.md new file mode 100644 index 00000000..a6c60107 --- /dev/null +++ b/packages/typescript/ai-grok/CHANGELOG.md @@ -0,0 +1,7 @@ +# @tanstack/ai-grok + +## 0.0.3 + +### Patch Changes + +- Initial release of Grok (xAI) adapter for TanStack AI diff --git a/packages/typescript/ai-grok/README.md b/packages/typescript/ai-grok/README.md new file mode 100644 index 00000000..b4f87f9f --- /dev/null +++ b/packages/typescript/ai-grok/README.md @@ -0,0 +1,130 @@ +# @tanstack/ai-grok + +Grok (xAI) adapter for TanStack AI + +## Installation + +```bash +npm install @tanstack/ai-grok +# or +pnpm add @tanstack/ai-grok +# or +yarn add @tanstack/ai-grok +``` + +## Setup + +Get your API key from [xAI Console](https://console.x.ai) and set it as an environment variable: + +```bash +export XAI_API_KEY="xai-..." +``` + +## Usage + +### Text/Chat Adapter + +```typescript +import { grokText } from '@tanstack/ai-grok' +import { generate } from '@tanstack/ai' + +const adapter = grokText() + +const result = await generate({ + adapter, + model: 'grok-3', + messages: [ + { role: 'user', content: 'Explain quantum computing in simple terms' }, + ], +}) + +console.log(result.text) +``` + +### Summarization Adapter + +```typescript +import { grokSummarize } from '@tanstack/ai-grok' +import { summarize } from '@tanstack/ai' + +const adapter = grokSummarize() + +const result = await summarize({ + adapter, + model: 'grok-3', + text: 'Long article text...', + style: 'bullet-points', +}) + +console.log(result.summary) +``` + +### Image Generation Adapter + +```typescript +import { grokImage } from '@tanstack/ai-grok' +import { generateImages } from '@tanstack/ai' + +const adapter = grokImage() + +const result = await generateImages({ + adapter, + model: 'grok-2-image-1212', + prompt: 'A beautiful sunset over mountains', + numberOfImages: 1, + size: '1024x1024', +}) + +console.log(result.images[0].url) +``` + +### With Explicit API Key + +```typescript +import { createGrokText } from '@tanstack/ai-grok' + +const adapter = createGrokText('xai-your-api-key-here') +``` + +## Supported Models + +### Chat Models + +- `grok-4` - Latest flagship model +- `grok-3` - Previous generation model +- `grok-3-mini` - Smaller, faster model +- `grok-4-fast` - Fast inference model +- `grok-4.1-fast` - Production-focused fast model +- `grok-2-vision-1212` - Vision-capable model (text + image input) + +### Image Models + +- `grok-2-image-1212` - Image generation model + +## Features + +- ✅ Streaming chat completions +- ✅ Structured output (JSON Schema) +- ✅ Function/tool calling +- ✅ Multimodal input (text + images for vision models) +- ✅ Image generation +- ✅ Text summarization +- ❌ Embeddings (not supported by xAI) + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```typescript +// Only imports text adapter +import { grokText } from '@tanstack/ai-grok' + +// Only imports image adapter +import { grokImage } from '@tanstack/ai-grok' +``` + +This keeps your bundle size small! + +## License + +MIT diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json new file mode 100644 index 00000000..d59054cb --- /dev/null +++ b/packages/typescript/ai-grok/package.json @@ -0,0 +1,53 @@ +{ + "name": "@tanstack/ai-grok", + "version": "0.0.3", + "description": "Grok (xAI) adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-grok" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "grok", + "xai", + "tanstack", + "adapter" + ], + "dependencies": { + "openai": "^6.9.1" + }, + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + } +} diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts new file mode 100644 index 00000000..beb5dd18 --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -0,0 +1,176 @@ +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { createGrokClient, generateId, getGrokApiKeyFromEnv } from '../utils' +import { + validateImageSize, + validateNumberOfImages, + validatePrompt, +} from '../image/image-provider-options' +import type { GROK_IMAGE_MODELS } from '../model-meta' +import type { + GrokImageModelProviderOptionsByName, + GrokImageModelSizeByName, + GrokImageProviderOptions, +} from '../image/image-provider-options' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok image adapter + */ +export interface GrokImageConfig extends GrokClientConfig {} + +/** Model type for Grok Image */ +export type GrokImageModel = (typeof GROK_IMAGE_MODELS)[number] + +/** + * Grok Image Generation Adapter + * + * Tree-shakeable adapter for Grok image generation functionality. + * Supports grok-2-image-1212 model. + * + * Features: + * - Model-specific type-safe provider options + * - Size validation per model + * - Number of images validation + */ +export class GrokImageAdapter< + TModel extends GrokImageModel, +> extends BaseImageAdapter< + TModel, + GrokImageProviderOptions, + GrokImageModelProviderOptionsByName, + GrokImageModelSizeByName +> { + readonly kind = 'image' as const + readonly name = 'grok' as const + + private client: OpenAI_SDK + + constructor(config: GrokImageConfig, model: TModel) { + super({}, model) + this.client = createGrokClient(config) + } + + async generateImages( + options: ImageGenerationOptions, + ): Promise { + const { model, prompt, numberOfImages, size } = options + + // Validate inputs + validatePrompt({ prompt, model }) + validateImageSize(model, size) + validateNumberOfImages(model, numberOfImages) + + // Build request based on model type + const request = this.buildRequest(options) + + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + return this.transformResponse(model, response) + } + + private buildRequest( + options: ImageGenerationOptions, + ): OpenAI_SDK.Images.ImageGenerateParams { + const { model, prompt, numberOfImages, size, modelOptions } = options + + return { + model, + prompt, + n: numberOfImages ?? 1, + size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, + } + } + + private transformResponse( + model: string, + response: OpenAI_SDK.Images.ImagesResponse, + ): ImageGenerationResult { + const images: Array = (response.data ?? []).map((item) => ({ + b64Json: item.b64_json, + url: item.url, + revisedPrompt: item.revised_prompt, + })) + + return { + id: generateId(this.name), + model, + images, + usage: response.usage + ? { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.total_tokens, + } + : undefined, + } + } +} + +/** + * Creates a Grok image adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok image adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokImage('grok-2-image-1212', "xai-..."); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A cute baby sea otter' + * }); + * ``` + */ +export function createGrokImage( + model: TModel, + apiKey: string, + config?: Omit, +): GrokImageAdapter { + return new GrokImageAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok image adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok image adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokImage('grok-2-image-1212'); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A beautiful sunset over mountains' + * }); + * ``` + */ +export function grokImage( + model: TModel, + config?: Omit, +): GrokImageAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokImage(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts new file mode 100644 index 00000000..5cd273f0 --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -0,0 +1,174 @@ +import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { getGrokApiKeyFromEnv } from '../utils' +import { GrokTextAdapter } from './text' +import type { GROK_CHAT_MODELS } from '../model-meta' +import type { + StreamChunk, + SummarizationOptions, + SummarizationResult, +} from '@tanstack/ai' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok summarize adapter + */ +export interface GrokSummarizeConfig extends GrokClientConfig {} + +/** + * Grok-specific provider options for summarization + */ +export interface GrokSummarizeProviderOptions { + /** Temperature for response generation (0-2) */ + temperature?: number + /** Maximum tokens in the response */ + maxTokens?: number +} + +/** Model type for Grok summarization */ +export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] + +/** + * Grok Summarize Adapter + * + * A thin wrapper around the text adapter that adds summarization-specific prompting. + * Delegates all API calls to the GrokTextAdapter. + */ +export class GrokSummarizeAdapter< + TModel extends GrokSummarizeModel, +> extends BaseSummarizeAdapter { + readonly kind = 'summarize' as const + readonly name = 'grok' as const + + private textAdapter: GrokTextAdapter + + constructor(config: GrokSummarizeConfig, model: TModel) { + super({}, model) + this.textAdapter = new GrokTextAdapter(config, model) + } + + async summarize(options: SummarizationOptions): Promise { + const systemPrompt = this.buildSummarizationPrompt(options) + + // Use the text adapter's streaming and collect the result + let summary = '' + let id = '' + let model = options.model + let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } + + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + })) { + if (chunk.type === 'content') { + summary = chunk.content + id = chunk.id + model = chunk.model + } + if (chunk.type === 'done' && chunk.usage) { + usage = chunk.usage + } + } + + return { id, model, summary, usage } + } + + async *summarizeStream( + options: SummarizationOptions, + ): AsyncIterable { + const systemPrompt = this.buildSummarizationPrompt(options) + + // Delegate directly to the text adapter's streaming + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + }) + } + + private buildSummarizationPrompt(options: SummarizationOptions): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + if (options.maxLength) { + prompt += `Keep the summary under ${options.maxLength} tokens. ` + } + + return prompt + } +} + +/** + * Creates a Grok summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok summarize adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokSummarize('grok-3', "xai-..."); + * ``` + */ +export function createGrokSummarize( + model: TModel, + apiKey: string, + config?: Omit, +): GrokSummarizeAdapter { + return new GrokSummarizeAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok summarize adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok summarize adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokSummarize('grok-3'); + * + * await summarize({ + * adapter, + * text: "Long article text..." + * }); + * ``` + */ +export function grokSummarize( + model: TModel, + config?: Omit, +): GrokSummarizeAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokSummarize(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts new file mode 100644 index 00000000..bef2ffaf --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -0,0 +1,506 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools' +import { + createGrokClient, + generateId, + getGrokApiKeyFromEnv, + makeGrokStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { + GROK_CHAT_MODELS, + ResolveInputModalities, + ResolveProviderOptions, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type OpenAI_SDK from 'openai' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { + GrokImageMetadata, + GrokMessageMetadataByModality, +} from '../message-types' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok text adapter + */ +export interface GrokTextConfig extends GrokClientConfig {} + +/** + * Alias for TextProviderOptions for external use + */ +export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../text/text-provider-options' + +/** + * Grok Text (Chat) Adapter + * + * Tree-shakeable adapter for Grok chat/text completion functionality. + * Uses OpenAI-compatible Chat Completions API (not Responses API). + */ +export class GrokTextAdapter< + TModel extends (typeof GROK_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + GrokMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'grok' as const + + private client: OpenAI_SDK + + constructor(config: GrokTextConfig, model: TModel) { + super({}, model) + this.client = createGrokClient(config) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToGrok(options) + + try { + const stream = await this.client.chat.completions.create({ + ...requestParams, + stream: true, + }) + + yield* this.processGrokStreamChunks(stream, options) + } catch (error: unknown) { + const err = error as Error + console.error('>>> chatStream: Fatal error during response creation <<<') + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + throw error + } + } + + /** + * Generate structured output using Grok's JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * Grok has strict requirements for structured output (via OpenAI-compatible API): + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply Grok-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapTextOptionsToGrok(chatOptions) + + // Apply Grok-specific transformations for structured output compatibility + const jsonSchema = makeGrokStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.chat.completions.create({ + ...requestParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }) + + // Extract text content from the response + const rawText = response.choices[0]?.message.content || '' + + // Parse the JSON response + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // Transform null values to undefined to match original Zod schema expectations + // Grok returns null for optional fields we made nullable in the schema + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error('>>> structuredOutput: Error during response creation <<<') + console.error('>>> Error message:', err.message) + throw error + } + } + + private async *processGrokStreamChunks( + stream: AsyncIterable, + options: TextOptions, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = Date.now() + let responseId = generateId(this.name) + + // Track tool calls being streamed (arguments come in chunks) + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + } + >() + + try { + for await (const chunk of stream) { + responseId = chunk.id || responseId + const choice = chunk.choices[0] + + if (!choice) continue + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + // Handle content delta + if (deltaContent) { + accumulatedContent += deltaContent + yield { + type: 'content', + id: responseId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent, + content: accumulatedContent, + role: 'assistant', + } + } + + // Handle tool calls - they come in as deltas + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + // Initialize or update the tool call in progress + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + // Update with any new data from the delta + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + } + } + + // Handle finish reason + if (choice.finish_reason) { + // Emit all completed tool calls + if ( + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [index, toolCall] of toolCallsInProgress) { + yield { + type: 'tool_call', + id: responseId, + model: chunk.model || options.model, + timestamp, + index, + toolCall: { + id: toolCall.id, + type: 'function', + function: { + name: toolCall.name, + arguments: toolCall.arguments, + }, + }, + } + } + } + + yield { + type: 'done', + id: responseId, + model: chunk.model || options.model, + timestamp, + usage: chunk.usage + ? { + promptTokens: chunk.usage.prompt_tokens || 0, + completionTokens: chunk.usage.completion_tokens || 0, + totalTokens: chunk.usage.total_tokens || 0, + } + : undefined, + finishReason: + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : 'stop', + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log('[Grok Adapter] Stream ended with error:', err.message) + yield { + type: 'error', + id: responseId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common options to Grok-specific Chat Completions format + */ + private mapTextOptionsToGrok( + options: TextOptions, + ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { + const modelOptions = options.modelOptions as + | Omit< + InternalTextProviderOptions, + 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' + > + | undefined + + if (modelOptions) { + validateTextProviderOptions({ + ...modelOptions, + model: options.model, + }) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + // Build messages array with system prompts + const messages: Array = + [] + + // Add system prompts first + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + // Convert messages + for (const message of options.messages) { + messages.push(this.convertMessageToGrok(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + tools: tools as Array, + stream: true, + stream_options: { include_usage: true }, + } + } + + private convertMessageToGrok( + message: ModelMessage, + ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { + // Handle tool messages + if (message.role === 'tool') { + return { + role: 'tool', + tool_call_id: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + // Handle assistant messages + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), + } + } + + // Handle user messages - support multimodal content + const contentParts = this.normalizeContent(message.content) + + // If only text, use simple string format + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + // Otherwise, use array format for multimodal + const parts: Array = + [] + for (const part of contentParts) { + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content }) + } else if (part.type === 'image') { + const imageMetadata = part.metadata as GrokImageMetadata | undefined + parts.push({ + type: 'image_url', + image_url: { + url: part.source.value, + detail: imageMetadata?.detail || 'auto', + }, + }) + } + } + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + // It's an array of ContentPart + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Creates a Grok text adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok text adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokText('grok-3', "xai-..."); + * // adapter has type-safe providerOptions for grok-3 + * ``` + */ +export function createGrokText< + TModel extends (typeof GROK_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): GrokTextAdapter { + return new GrokTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok text adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok text adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokText('grok-3'); + * + * const stream = chat({ + * adapter, + * messages: [{ role: "user", content: "Hello!" }] + * }); + * ``` + */ +export function grokText( + model: TModel, + config?: Omit, +): GrokTextAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokText(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/image/image-provider-options.ts b/packages/typescript/ai-grok/src/image/image-provider-options.ts new file mode 100644 index 00000000..9b0d9ee5 --- /dev/null +++ b/packages/typescript/ai-grok/src/image/image-provider-options.ts @@ -0,0 +1,118 @@ +/** + * Grok Image Generation Provider Options + * + * These are provider-specific options for Grok image generation. + * Grok uses the grok-2-image-1212 model for image generation. + */ + +/** + * Supported sizes for grok-2-image-1212 model + */ +export type GrokImageSize = '1024x1024' | '1536x1024' | '1024x1536' + +/** + * Base provider options for Grok image models + */ +export interface GrokImageBaseProviderOptions { + /** + * A unique identifier representing your end-user. + * Can help xAI to monitor and detect abuse. + */ + user?: string +} + +/** + * Provider options for grok-2-image-1212 model + */ +export interface GrokImageProviderOptions extends GrokImageBaseProviderOptions { + /** + * The quality of the image. + * @default 'standard' + */ + quality?: 'standard' | 'hd' + + /** + * The format in which generated images are returned. + * URLs are only valid for 60 minutes after generation. + * @default 'url' + */ + response_format?: 'url' | 'b64_json' +} + +/** + * Type-only map from model name to its specific provider options. + */ +export type GrokImageModelProviderOptionsByName = { + 'grok-2-image-1212': GrokImageProviderOptions +} + +/** + * Type-only map from model name to its supported sizes. + */ +export type GrokImageModelSizeByName = { + 'grok-2-image-1212': GrokImageSize +} + +/** + * Internal options interface for validation + */ +interface ImageValidationOptions { + prompt: string + model: string +} + +/** + * Validates that the provided size is supported by the model. + * Throws a descriptive error if the size is not supported. + */ +export function validateImageSize( + model: string, + size: string | undefined, +): void { + if (!size) return + + const validSizes: Record> = { + 'grok-2-image-1212': ['1024x1024', '1536x1024', '1024x1536'], + } + + const modelSizes = validSizes[model] + if (!modelSizes) { + throw new Error(`Unknown image model: ${model}`) + } + + if (!modelSizes.includes(size)) { + throw new Error( + `Size "${size}" is not supported by model "${model}". ` + + `Supported sizes: ${modelSizes.join(', ')}`, + ) + } +} + +/** + * Validates that the number of images is within bounds for the model. + */ +export function validateNumberOfImages( + _model: string, + numberOfImages: number | undefined, +): void { + if (numberOfImages === undefined) return + + // grok-2-image-1212 supports 1-10 images per request + if (numberOfImages < 1 || numberOfImages > 10) { + throw new Error( + `Number of images must be between 1 and 10. Requested: ${numberOfImages}`, + ) + } +} + +export const validatePrompt = (options: ImageValidationOptions) => { + if (options.prompt.length === 0) { + throw new Error('Prompt cannot be empty.') + } + // Grok image model supports up to 4000 characters + if (options.prompt.length > 4000) { + throw new Error( + 'For grok-2-image-1212, prompt length must be less than or equal to 4000 characters.', + ) + } +} diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts new file mode 100644 index 00000000..1002a5dc --- /dev/null +++ b/packages/typescript/ai-grok/src/index.ts @@ -0,0 +1,55 @@ +// ============================================================================ +// New Tree-Shakeable Adapters (Recommended) +// ============================================================================ + +// Text (Chat) adapter - for chat/text completion +export { + GrokTextAdapter, + createGrokText, + grokText, + type GrokTextConfig, + type GrokTextProviderOptions, +} from './adapters/text' + +// Summarize adapter - for text summarization +export { + GrokSummarizeAdapter, + createGrokSummarize, + grokSummarize, + type GrokSummarizeConfig, + type GrokSummarizeProviderOptions, + type GrokSummarizeModel, +} from './adapters/summarize' + +// Image adapter - for image generation +export { + GrokImageAdapter, + createGrokImage, + grokImage, + type GrokImageConfig, + type GrokImageModel, +} from './adapters/image' +export type { + GrokImageProviderOptions, + GrokImageModelProviderOptionsByName, +} from './image/image-provider-options' + +// ============================================================================ +// Type Exports +// ============================================================================ + +export type { + GrokChatModelProviderOptionsByName, + GrokModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, +} from './model-meta' +export { GROK_CHAT_MODELS, GROK_IMAGE_MODELS } from './model-meta' +export type { + GrokTextMetadata, + GrokImageMetadata, + GrokAudioMetadata, + GrokVideoMetadata, + GrokDocumentMetadata, + GrokMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-grok/src/message-types.ts b/packages/typescript/ai-grok/src/message-types.ts new file mode 100644 index 00000000..ec9e7b18 --- /dev/null +++ b/packages/typescript/ai-grok/src/message-types.ts @@ -0,0 +1,67 @@ +/** + * Grok-specific metadata types for multimodal content parts. + * These types extend the base ContentPart metadata with Grok-specific options. + * + * Grok uses an OpenAI-compatible API, so metadata types are similar to OpenAI. + * + * @see https://docs.x.ai + */ + +/** + * Metadata for Grok image content parts. + * Controls how the model processes and analyzes images. + */ +export interface GrokImageMetadata { + /** + * Controls how the model processes the image. + * - 'auto': Let the model decide based on image size and content + * - 'low': Use low resolution processing (faster, cheaper, less detail) + * - 'high': Use high resolution processing (slower, more expensive, more detail) + * + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Grok audio content parts. + * Specifies the audio format for proper processing. + */ +export interface GrokAudioMetadata { + /** + * The format of the audio. + * Supported formats: mp3, wav, flac, etc. + * @default 'mp3' + */ + format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac' +} + +/** + * Metadata for Grok video content parts. + * Note: Video support in Grok is limited; check current API capabilities. + */ +export interface GrokVideoMetadata {} + +/** + * Metadata for Grok document content parts. + * Note: Direct document support may vary; PDFs often need to be converted to images. + */ +export interface GrokDocumentMetadata {} + +/** + * Metadata for Grok text content parts. + * Currently no specific metadata options for text in Grok. + */ +export interface GrokTextMetadata {} + +/** + * Map of modality types to their Grok-specific metadata types. + * Used for type inference when constructing multimodal messages. + */ +export interface GrokMessageMetadataByModality { + text: GrokTextMetadata + image: GrokImageMetadata + audio: GrokAudioMetadata + video: GrokVideoMetadata + document: GrokDocumentMetadata +} diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts new file mode 100644 index 00000000..c097d9ba --- /dev/null +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -0,0 +1,298 @@ +/** + * Model metadata interface for documentation and type inference + */ +interface ModelMeta { + name: string + supports: { + input: Array<'text' | 'image' | 'audio' | 'video' | 'document'> + output: Array<'text' | 'image' | 'audio' | 'video'> + capabilities?: Array<'reasoning' | 'tool_calling' | 'structured_outputs'> + } + max_input_tokens?: number + max_output_tokens?: number + context_window?: number + knowledge_cutoff?: string + pricing?: { + input: { + normal: number + cached?: number + } + output: { + normal: number + } + } +} + +const GROK_4_1_FAST_REASONING = { + name: 'grok-4-1-fast-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_1_FAST_NON_REASONING = { + name: 'grok-4-1-fast-non-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_CODE_FAST_1 = { + name: 'grok-code-fast-1', + context_window: 256_000, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.02, + }, + output: { + normal: 1.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_FAST_REASONING = { + name: 'grok-4-fast-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_FAST_NON_REASONING = { + name: 'grok-4-fast-non-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4 = { + name: 'grok-4', + context_window: 256_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, +} as const satisfies ModelMeta + +const GROK_3_MINI = { + name: 'grok-3-mini', + context_window: 131_072, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.3, + cached: 0.075, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_3 = { + name: 'grok-3', + context_window: 131_072, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, +} as const satisfies ModelMeta + +const GROK_2_VISION = { + name: 'grok-2-vision-1212', + context_window: 32_768, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 2, + }, + output: { + normal: 10, + }, + }, +} as const satisfies ModelMeta + +const GROK_2_IMAGE = { + name: 'grok-2-image-1212', + supports: { + input: ['text'], + output: ['image'], + }, + pricing: { + input: { + normal: 0.07, + }, + output: { + normal: 0.07, + }, + }, +} as const satisfies ModelMeta + +/** + * Grok Chat Models + * Based on xAI's available models as of 2025 + */ +export const GROK_CHAT_MODELS = [ + GROK_4_1_FAST_REASONING.name, + GROK_4_1_FAST_NON_REASONING.name, + GROK_CODE_FAST_1.name, + GROK_4_FAST_REASONING.name, + GROK_4_FAST_NON_REASONING.name, + GROK_4.name, + GROK_3.name, + GROK_3_MINI.name, + GROK_2_VISION.name, +] as const + +/** + * Grok Image Generation Models + */ +export const GROK_IMAGE_MODELS = [GROK_2_IMAGE.name] as const + +/** + * Type-only map from Grok chat model name to its supported input modalities. + * Used for type inference when constructing multimodal messages. + */ +export type GrokModelInputModalitiesByName = { + [GROK_4_1_FAST_REASONING.name]: typeof GROK_4_1_FAST_REASONING.supports.input + [GROK_4_1_FAST_NON_REASONING.name]: typeof GROK_4_1_FAST_NON_REASONING.supports.input + [GROK_CODE_FAST_1.name]: typeof GROK_CODE_FAST_1.supports.input + [GROK_4_FAST_REASONING.name]: typeof GROK_4_FAST_REASONING.supports.input + [GROK_4_FAST_NON_REASONING.name]: typeof GROK_4_FAST_NON_REASONING.supports.input + [GROK_4.name]: typeof GROK_4.supports.input + [GROK_3.name]: typeof GROK_3.supports.input + [GROK_3_MINI.name]: typeof GROK_3_MINI.supports.input + [GROK_2_VISION.name]: typeof GROK_2_VISION.supports.input +} + +/** + * Type-only map from Grok chat model name to its provider options type. + * Since Grok uses OpenAI-compatible API, we reuse OpenAI provider options. + */ +export type GrokChatModelProviderOptionsByName = { + [K in (typeof GROK_CHAT_MODELS)[number]]: GrokProviderOptions +} + +/** + * Grok-specific provider options + * Based on OpenAI-compatible API options + */ +export interface GrokProviderOptions { + /** Temperature for response generation (0-2) */ + temperature?: number + /** Maximum tokens in the response */ + max_tokens?: number + /** Top-p sampling parameter */ + top_p?: number + /** Frequency penalty (-2.0 to 2.0) */ + frequency_penalty?: number + /** Presence penalty (-2.0 to 2.0) */ + presence_penalty?: number + /** Stop sequences */ + stop?: string | Array + /** A unique identifier representing your end-user */ + user?: string +} + +// =========================== +// Type Resolution Helpers +// =========================== + +/** + * Resolve provider options for a specific model. + * If the model has explicit options in the map, use those; otherwise use base options. + */ +export type ResolveProviderOptions = + TModel extends keyof GrokChatModelProviderOptionsByName + ? GrokChatModelProviderOptionsByName[TModel] + : GrokProviderOptions + +/** + * Resolve input modalities for a specific model. + * If the model has explicit modalities in the map, use those; otherwise use text only. + */ +export type ResolveInputModalities = + TModel extends keyof GrokModelInputModalitiesByName + ? GrokModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts new file mode 100644 index 00000000..a05222ff --- /dev/null +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -0,0 +1,77 @@ +import type { FunctionTool } from '../tools/function-tool' + +/** + * Grok Text Provider Options + * + * Grok uses an OpenAI-compatible Chat Completions API. + * However, not all OpenAI features may be supported by Grok. + */ + +/** + * Base provider options for Grok text/chat models + */ +export interface GrokBaseOptions { + /** + * A unique identifier representing your end-user. + * Can help xAI to monitor and detect abuse. + */ + user?: string +} + +/** + * Grok-specific provider options for text/chat + * Based on OpenAI-compatible API options + */ +export interface GrokTextProviderOptions extends GrokBaseOptions { + /** + * Temperature for response generation (0-2) + * Higher values make output more random, lower values more focused + */ + temperature?: number + /** + * Top-p sampling parameter (0-1) + * Alternative to temperature, nucleus sampling + */ + top_p?: number + /** + * Maximum tokens in the response + */ + max_tokens?: number + /** + * Frequency penalty (-2.0 to 2.0) + */ + frequency_penalty?: number + /** + * Presence penalty (-2.0 to 2.0) + */ + presence_penalty?: number + /** + * Stop sequences + */ + stop?: string | Array +} + +/** + * Internal options interface for validation + * Used internally by the adapter + */ +export interface InternalTextProviderOptions extends GrokTextProviderOptions { + model: string + stream?: boolean + tools?: Array +} + +/** + * External provider options (what users pass in) + */ +export type ExternalTextProviderOptions = GrokTextProviderOptions + +/** + * Validates text provider options + */ +export function validateTextProviderOptions( + _options: InternalTextProviderOptions, +): void { + // Basic validation can be added here if needed + // For now, Grok API will handle validation +} diff --git a/packages/typescript/ai-grok/src/tools/function-tool.ts b/packages/typescript/ai-grok/src/tools/function-tool.ts new file mode 100644 index 00000000..646fb895 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/function-tool.ts @@ -0,0 +1,45 @@ +import { makeGrokStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +// Use Chat Completions API tool format (not Responses API) +export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool + +/** + * Converts a standard Tool to Grok ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Grok-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + // Tool schemas are already converted to JSON Schema in the ai layer + // Apply Grok-specific transformations for strict mode + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = makeGrokStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + // Ensure additionalProperties is false for strict mode + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, // Always use strict mode since our schema converter handles the requirements + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts new file mode 100644 index 00000000..c9033415 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts new file mode 100644 index 00000000..969fdb72 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/tool-converter.ts @@ -0,0 +1,17 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Grok-specific format + * Grok uses OpenAI-compatible API, so we primarily support function tools + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + // For Grok, all tools are converted as function tools + // Grok uses OpenAI-compatible API which primarily supports function tools + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts new file mode 100644 index 00000000..2a559076 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -0,0 +1,45 @@ +import OpenAI_SDK from 'openai' + +export interface GrokClientConfig { + apiKey: string + baseURL?: string +} + +/** + * Creates a Grok SDK client instance using OpenAI SDK with xAI's base URL + */ +export function createGrokClient(config: GrokClientConfig): OpenAI_SDK { + return new OpenAI_SDK({ + apiKey: config.apiKey, + baseURL: config.baseURL || 'https://api.x.ai/v1', + }) +} + +/** + * Gets Grok API key from environment variables + * @throws Error if XAI_API_KEY is not found + */ +export function getGrokApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.XAI_API_KEY + + if (!key) { + throw new Error( + 'XAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-grok/src/utils/index.ts b/packages/typescript/ai-grok/src/utils/index.ts new file mode 100644 index 00000000..72c2f529 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createGrokClient, + getGrokApiKeyFromEnv, + generateId, + type GrokClientConfig, +} from './client' +export { + makeGrokStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts new file mode 100644 index 00000000..38c345e2 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -0,0 +1,110 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Grok's structured output (via OpenAI-compatible API) requires all fields to be + * in the `required` array, with optional fields made nullable (type: ["string", "null"]). + * When Grok returns null for optional fields, we need to convert them back to + * undefined to match the original Zod schema expectations. + * + * @param obj - Object to transform + * @returns Object with nulls converted to undefined + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if (typeof obj === 'object') { + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + const transformed = transformNullsToUndefined(value) + // Only include the key if the value is not undefined + // This makes { notes: null } become {} (field absent) instead of { notes: undefined } + if (transformed !== undefined) { + result[key] = transformed + } + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Grok's structured output requirements (OpenAI-compatible). + * Grok requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with Grok structured output + */ +export function makeGrokStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + // Handle object types + if (result.type === 'object' && result.properties) { + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + // Transform each property + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + // Recursively transform nested objects/arrays + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeGrokStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeGrokStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (wasOptional) { + // Make optional fields nullable by adding null to the type + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + // ALL properties must be required for Grok structured output + result.required = allPropertyNames + // additionalProperties must be false + result.additionalProperties = false + } + + // Handle array types with object items + if (result.type === 'array' && result.items) { + result.items = makeGrokStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts new file mode 100644 index 00000000..09373f50 --- /dev/null +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -0,0 +1,99 @@ +import { describe, it, expect, vi, afterEach } from 'vitest' +import { createGrokText, grokText } from '../src/adapters/text' +import { createGrokImage, grokImage } from '../src/adapters/image' +import { createGrokSummarize, grokSummarize } from '../src/adapters/summarize' + +describe('Grok adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('Text adapter', () => { + it('creates a text adapter with explicit API key', () => { + const adapter = createGrokText('grok-3', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('grok') + expect(adapter.model).toBe('grok-3') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokText('grok-4-0709') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('grok-4-0709') + }) + + it('throws if XAI_API_KEY is not set when using grokText', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokText('grok-3')).toThrow('XAI_API_KEY is required') + }) + + it('allows custom baseURL override', () => { + const adapter = createGrokText('grok-3', 'test-api-key', { + baseURL: 'https://custom.api.example.com/v1', + }) + + expect(adapter).toBeDefined() + }) + }) + + describe('Image adapter', () => { + it('creates an image adapter with explicit API key', () => { + const adapter = createGrokImage('grok-2-image-1212', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('image') + expect(adapter.name).toBe('grok') + expect(adapter.model).toBe('grok-2-image-1212') + }) + + it('creates an image adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokImage('grok-2-image-1212') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('image') + }) + + it('throws if XAI_API_KEY is not set when using grokImage', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokImage('grok-2-image-1212')).toThrow( + 'XAI_API_KEY is required', + ) + }) + }) + + describe('Summarize adapter', () => { + it('creates a summarize adapter with explicit API key', () => { + const adapter = createGrokSummarize('grok-3', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('summarize') + expect(adapter.name).toBe('grok') + expect(adapter.model).toBe('grok-3') + }) + + it('creates a summarize adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokSummarize('grok-4-0709') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('summarize') + }) + + it('throws if XAI_API_KEY is not set when using grokSummarize', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokSummarize('grok-3')).toThrow('XAI_API_KEY is required') + }) + }) +}) diff --git a/packages/typescript/ai-grok/tsconfig.json b/packages/typescript/ai-grok/tsconfig.json new file mode 100644 index 00000000..ea11c109 --- /dev/null +++ b/packages/typescript/ai-grok/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-grok/vite.config.ts b/packages/typescript/ai-grok/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-grok/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/smoke-tests/adapters/package.json b/packages/typescript/smoke-tests/adapters/package.json index 17f686eb..67654826 100644 --- a/packages/typescript/smoke-tests/adapters/package.json +++ b/packages/typescript/smoke-tests/adapters/package.json @@ -14,6 +14,7 @@ "@tanstack/ai": "workspace:*", "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "commander": "^13.1.0" diff --git a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts index 91efe0a2..4eb9b883 100644 --- a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts +++ b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts @@ -5,6 +5,7 @@ import { geminiSummarize, geminiText, } from '@tanstack/ai-gemini' +import { grokImage, grokSummarize, grokText } from '@tanstack/ai-grok' import { ollamaSummarize, ollamaText } from '@tanstack/ai-ollama' import { openaiImage, @@ -77,6 +78,10 @@ const GEMINI_TTS_MODEL = const OLLAMA_MODEL = process.env.OLLAMA_MODEL || 'mistral:7b' const OLLAMA_SUMMARY_MODEL = process.env.OLLAMA_SUMMARY_MODEL || OLLAMA_MODEL +const GROK_MODEL = process.env.GROK_MODEL || 'grok-3' +const GROK_SUMMARY_MODEL = process.env.GROK_SUMMARY_MODEL || GROK_MODEL +const GROK_IMAGE_MODEL = process.env.GROK_IMAGE_MODEL || 'grok-2-image-1212' + /** * Create Anthropic adapters */ @@ -160,6 +165,26 @@ function createOllamaAdapters(): AdapterSet | null { } } +/** + * Create Grok adapters + */ +function createGrokAdapters(): AdapterSet | null { + const apiKey = process.env.XAI_API_KEY + if (!apiKey) return null + + return { + textAdapter: grokText(GROK_MODEL as any, { apiKey } as any), + summarizeAdapter: grokSummarize( + GROK_SUMMARY_MODEL as any, + { apiKey } as any, + ), + imageAdapter: grokImage(GROK_IMAGE_MODEL as any, { apiKey } as any), + chatModel: GROK_MODEL, + summarizeModel: GROK_SUMMARY_MODEL, + imageModel: GROK_IMAGE_MODEL, + } +} + /** * Registry of all available adapters */ @@ -189,6 +214,12 @@ export const ADAPTERS: Array = [ envKey: null, create: createOllamaAdapters, }, + { + id: 'grok', + name: 'Grok', + envKey: 'XAI_API_KEY', + create: createGrokAdapters, + }, ] /** diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c3ed56e5..ff102908 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -201,6 +201,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../packages/typescript/ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../packages/typescript/ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -695,6 +698,25 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-grok: + dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.18.3)(zod@4.2.1) + zod: + specifier: ^4.0.0 + version: 4.2.1 + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.15(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-ollama: dependencies: '@tanstack/ai': @@ -1012,6 +1034,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../ai-ollama @@ -1144,6 +1169,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../packages/typescript/ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../packages/typescript/ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -13736,6 +13764,11 @@ snapshots: ws: 8.18.3 zod: 4.1.13 + openai@6.10.0(ws@8.18.3)(zod@4.2.1): + optionalDependencies: + ws: 8.18.3 + zod: 4.2.1 + optionator@0.9.4: dependencies: deep-is: 0.1.4 diff --git a/testing/panel/package.json b/testing/panel/package.json index 2ae153c3..f5d6863c 100644 --- a/testing/panel/package.json +++ b/testing/panel/package.json @@ -13,6 +13,7 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", diff --git a/testing/panel/src/lib/model-selection.ts b/testing/panel/src/lib/model-selection.ts index 0412d275..4d40ccc7 100644 --- a/testing/panel/src/lib/model-selection.ts +++ b/testing/panel/src/lib/model-selection.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' export interface ModelOption { provider: Provider @@ -67,6 +67,23 @@ export const MODEL_OPTIONS: Array = [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Grok + { + provider: 'grok', + model: 'grok-3', + label: 'Grok - Grok 3', + }, + { + provider: 'grok', + model: 'grok-3-mini', + label: 'Grok - Grok 3 Mini', + }, + { + provider: 'grok', + model: 'grok-2-vision-1212', + label: 'Grok - Grok 2 Vision', + }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/testing/panel/src/routes/api.chat.ts b/testing/panel/src/routes/api.chat.ts index 948d048b..4a0d29a0 100644 --- a/testing/panel/src/routes/api.chat.ts +++ b/testing/panel/src/routes/api.chat.ts @@ -9,6 +9,7 @@ import { } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' +import { grokText } from '@tanstack/ai-grok' import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import type { AIAdapter, StreamChunk } from '@tanstack/ai' @@ -51,7 +52,7 @@ const addToCartToolServer = addToCartToolDef.server((args) => ({ totalItems: args.quantity, })) -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' /** * Wraps an adapter to intercept chatStream and record raw chunks from the adapter @@ -172,6 +173,10 @@ export const Route = createFileRoute('/api/chat')({ createChatOptions({ adapter: geminiText((model || 'gemini-2.0-flash') as any), }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as any), + }), ollama: () => createChatOptions({ adapter: ollamaText((model || 'mistral:7b') as any),