From 85e513808e52684fae75cb86c20c24e084fbc259 Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Tue, 23 Dec 2025 06:46:13 -0800 Subject: [PATCH 1/9] feat: add Grok support --- examples/ts-react-chat/package.json | 1 + .../ts-react-chat/src/lib/model-selection.ts | 19 +- .../ts-react-chat/src/routes/api.tanchat.ts | 7 +- packages/typescript/ai-grok/CHANGELOG.md | 7 + packages/typescript/ai-grok/README.md | 130 +++++ packages/typescript/ai-grok/package.json | 53 ++ .../typescript/ai-grok/src/adapters/image.ts | 176 ++++++ .../ai-grok/src/adapters/summarize.ts | 170 ++++++ .../typescript/ai-grok/src/adapters/text.ts | 502 ++++++++++++++++++ .../src/image/image-provider-options.ts | 118 ++++ packages/typescript/ai-grok/src/index.ts | 55 ++ .../typescript/ai-grok/src/message-types.ts | 67 +++ packages/typescript/ai-grok/src/model-meta.ts | 84 +++ .../ai-grok/src/text/text-provider-options.ts | 77 +++ .../ai-grok/src/tools/function-tool.ts | 45 ++ .../typescript/ai-grok/src/tools/index.ts | 5 + .../ai-grok/src/tools/tool-converter.ts | 17 + .../typescript/ai-grok/src/utils/client.ts | 45 ++ .../typescript/ai-grok/src/utils/index.ts | 10 + .../ai-grok/src/utils/schema-converter.ts | 110 ++++ .../ai-grok/tests/grok-adapter.test.ts | 221 ++++++++ packages/typescript/ai-grok/tsconfig.json | 9 + packages/typescript/ai-grok/vite.config.ts | 36 ++ .../smoke-tests/adapters/package.json | 1 + .../adapters/src/adapters/index.ts | 28 + pnpm-lock.yaml | 33 ++ testing/panel/package.json | 1 + testing/panel/src/lib/model-selection.ts | 19 +- testing/panel/src/routes/api.chat.ts | 7 +- 29 files changed, 2049 insertions(+), 4 deletions(-) create mode 100644 packages/typescript/ai-grok/CHANGELOG.md create mode 100644 packages/typescript/ai-grok/README.md create mode 100644 packages/typescript/ai-grok/package.json create mode 100644 packages/typescript/ai-grok/src/adapters/image.ts create mode 100644 packages/typescript/ai-grok/src/adapters/summarize.ts create mode 100644 packages/typescript/ai-grok/src/adapters/text.ts create mode 100644 packages/typescript/ai-grok/src/image/image-provider-options.ts create mode 100644 packages/typescript/ai-grok/src/index.ts create mode 100644 packages/typescript/ai-grok/src/message-types.ts create mode 100644 packages/typescript/ai-grok/src/model-meta.ts create mode 100644 packages/typescript/ai-grok/src/text/text-provider-options.ts create mode 100644 packages/typescript/ai-grok/src/tools/function-tool.ts create mode 100644 packages/typescript/ai-grok/src/tools/index.ts create mode 100644 packages/typescript/ai-grok/src/tools/tool-converter.ts create mode 100644 packages/typescript/ai-grok/src/utils/client.ts create mode 100644 packages/typescript/ai-grok/src/utils/index.ts create mode 100644 packages/typescript/ai-grok/src/utils/schema-converter.ts create mode 100644 packages/typescript/ai-grok/tests/grok-adapter.test.ts create mode 100644 packages/typescript/ai-grok/tsconfig.json create mode 100644 packages/typescript/ai-grok/vite.config.ts diff --git a/examples/ts-react-chat/package.json b/examples/ts-react-chat/package.json index eb33f5eb..f2ff15ef 100644 --- a/examples/ts-react-chat/package.json +++ b/examples/ts-react-chat/package.json @@ -14,6 +14,7 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", diff --git a/examples/ts-react-chat/src/lib/model-selection.ts b/examples/ts-react-chat/src/lib/model-selection.ts index 0412d275..4d40ccc7 100644 --- a/examples/ts-react-chat/src/lib/model-selection.ts +++ b/examples/ts-react-chat/src/lib/model-selection.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' export interface ModelOption { provider: Provider @@ -67,6 +67,23 @@ export const MODEL_OPTIONS: Array = [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Grok + { + provider: 'grok', + model: 'grok-3', + label: 'Grok - Grok 3', + }, + { + provider: 'grok', + model: 'grok-3-mini', + label: 'Grok - Grok 3 Mini', + }, + { + provider: 'grok', + model: 'grok-2-vision-1212', + label: 'Grok - Grok 2 Vision', + }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index 550e373c..e5612590 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -9,6 +9,7 @@ import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' +import { grokText } from '@tanstack/ai-grok' import type { AnyTextAdapter } from '@tanstack/ai' import { addToCartToolDef, @@ -18,7 +19,7 @@ import { recommendGuitarToolDef, } from '@/lib/guitar-tools' -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. @@ -90,6 +91,10 @@ export const Route = createFileRoute('/api/tanchat')({ (model || 'gemini-2.5-flash') as 'gemini-2.5-flash', ), }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as 'grok-3'), + }), ollama: () => createChatOptions({ adapter: ollamaText((model || 'mistral:7b') as 'mistral:7b'), diff --git a/packages/typescript/ai-grok/CHANGELOG.md b/packages/typescript/ai-grok/CHANGELOG.md new file mode 100644 index 00000000..a6c60107 --- /dev/null +++ b/packages/typescript/ai-grok/CHANGELOG.md @@ -0,0 +1,7 @@ +# @tanstack/ai-grok + +## 0.0.3 + +### Patch Changes + +- Initial release of Grok (xAI) adapter for TanStack AI diff --git a/packages/typescript/ai-grok/README.md b/packages/typescript/ai-grok/README.md new file mode 100644 index 00000000..b4f87f9f --- /dev/null +++ b/packages/typescript/ai-grok/README.md @@ -0,0 +1,130 @@ +# @tanstack/ai-grok + +Grok (xAI) adapter for TanStack AI + +## Installation + +```bash +npm install @tanstack/ai-grok +# or +pnpm add @tanstack/ai-grok +# or +yarn add @tanstack/ai-grok +``` + +## Setup + +Get your API key from [xAI Console](https://console.x.ai) and set it as an environment variable: + +```bash +export XAI_API_KEY="xai-..." +``` + +## Usage + +### Text/Chat Adapter + +```typescript +import { grokText } from '@tanstack/ai-grok' +import { generate } from '@tanstack/ai' + +const adapter = grokText() + +const result = await generate({ + adapter, + model: 'grok-3', + messages: [ + { role: 'user', content: 'Explain quantum computing in simple terms' }, + ], +}) + +console.log(result.text) +``` + +### Summarization Adapter + +```typescript +import { grokSummarize } from '@tanstack/ai-grok' +import { summarize } from '@tanstack/ai' + +const adapter = grokSummarize() + +const result = await summarize({ + adapter, + model: 'grok-3', + text: 'Long article text...', + style: 'bullet-points', +}) + +console.log(result.summary) +``` + +### Image Generation Adapter + +```typescript +import { grokImage } from '@tanstack/ai-grok' +import { generateImages } from '@tanstack/ai' + +const adapter = grokImage() + +const result = await generateImages({ + adapter, + model: 'grok-2-image-1212', + prompt: 'A beautiful sunset over mountains', + numberOfImages: 1, + size: '1024x1024', +}) + +console.log(result.images[0].url) +``` + +### With Explicit API Key + +```typescript +import { createGrokText } from '@tanstack/ai-grok' + +const adapter = createGrokText('xai-your-api-key-here') +``` + +## Supported Models + +### Chat Models + +- `grok-4` - Latest flagship model +- `grok-3` - Previous generation model +- `grok-3-mini` - Smaller, faster model +- `grok-4-fast` - Fast inference model +- `grok-4.1-fast` - Production-focused fast model +- `grok-2-vision-1212` - Vision-capable model (text + image input) + +### Image Models + +- `grok-2-image-1212` - Image generation model + +## Features + +- ✅ Streaming chat completions +- ✅ Structured output (JSON Schema) +- ✅ Function/tool calling +- ✅ Multimodal input (text + images for vision models) +- ✅ Image generation +- ✅ Text summarization +- ❌ Embeddings (not supported by xAI) + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```typescript +// Only imports text adapter +import { grokText } from '@tanstack/ai-grok' + +// Only imports image adapter +import { grokImage } from '@tanstack/ai-grok' +``` + +This keeps your bundle size small! + +## License + +MIT diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json new file mode 100644 index 00000000..d59054cb --- /dev/null +++ b/packages/typescript/ai-grok/package.json @@ -0,0 +1,53 @@ +{ + "name": "@tanstack/ai-grok", + "version": "0.0.3", + "description": "Grok (xAI) adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-grok" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "grok", + "xai", + "tanstack", + "adapter" + ], + "dependencies": { + "openai": "^6.9.1" + }, + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + } +} diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts new file mode 100644 index 00000000..beb5dd18 --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -0,0 +1,176 @@ +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { createGrokClient, generateId, getGrokApiKeyFromEnv } from '../utils' +import { + validateImageSize, + validateNumberOfImages, + validatePrompt, +} from '../image/image-provider-options' +import type { GROK_IMAGE_MODELS } from '../model-meta' +import type { + GrokImageModelProviderOptionsByName, + GrokImageModelSizeByName, + GrokImageProviderOptions, +} from '../image/image-provider-options' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok image adapter + */ +export interface GrokImageConfig extends GrokClientConfig {} + +/** Model type for Grok Image */ +export type GrokImageModel = (typeof GROK_IMAGE_MODELS)[number] + +/** + * Grok Image Generation Adapter + * + * Tree-shakeable adapter for Grok image generation functionality. + * Supports grok-2-image-1212 model. + * + * Features: + * - Model-specific type-safe provider options + * - Size validation per model + * - Number of images validation + */ +export class GrokImageAdapter< + TModel extends GrokImageModel, +> extends BaseImageAdapter< + TModel, + GrokImageProviderOptions, + GrokImageModelProviderOptionsByName, + GrokImageModelSizeByName +> { + readonly kind = 'image' as const + readonly name = 'grok' as const + + private client: OpenAI_SDK + + constructor(config: GrokImageConfig, model: TModel) { + super({}, model) + this.client = createGrokClient(config) + } + + async generateImages( + options: ImageGenerationOptions, + ): Promise { + const { model, prompt, numberOfImages, size } = options + + // Validate inputs + validatePrompt({ prompt, model }) + validateImageSize(model, size) + validateNumberOfImages(model, numberOfImages) + + // Build request based on model type + const request = this.buildRequest(options) + + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + return this.transformResponse(model, response) + } + + private buildRequest( + options: ImageGenerationOptions, + ): OpenAI_SDK.Images.ImageGenerateParams { + const { model, prompt, numberOfImages, size, modelOptions } = options + + return { + model, + prompt, + n: numberOfImages ?? 1, + size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, + } + } + + private transformResponse( + model: string, + response: OpenAI_SDK.Images.ImagesResponse, + ): ImageGenerationResult { + const images: Array = (response.data ?? []).map((item) => ({ + b64Json: item.b64_json, + url: item.url, + revisedPrompt: item.revised_prompt, + })) + + return { + id: generateId(this.name), + model, + images, + usage: response.usage + ? { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.total_tokens, + } + : undefined, + } + } +} + +/** + * Creates a Grok image adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok image adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokImage('grok-2-image-1212', "xai-..."); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A cute baby sea otter' + * }); + * ``` + */ +export function createGrokImage( + model: TModel, + apiKey: string, + config?: Omit, +): GrokImageAdapter { + return new GrokImageAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok image adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok image adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokImage('grok-2-image-1212'); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A beautiful sunset over mountains' + * }); + * ``` + */ +export function grokImage( + model: TModel, + config?: Omit, +): GrokImageAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokImage(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts new file mode 100644 index 00000000..1115732d --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -0,0 +1,170 @@ +import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { getGrokApiKeyFromEnv } from '../utils' +import { GrokTextAdapter } from './text' +import type { GROK_CHAT_MODELS } from '../model-meta' +import type { StreamChunk, SummarizationOptions, SummarizationResult } from '@tanstack/ai' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok summarize adapter + */ +export interface GrokSummarizeConfig extends GrokClientConfig {} + +/** + * Grok-specific provider options for summarization + */ +export interface GrokSummarizeProviderOptions { + /** Temperature for response generation (0-2) */ + temperature?: number + /** Maximum tokens in the response */ + maxTokens?: number +} + +/** Model type for Grok summarization */ +export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] + +/** + * Grok Summarize Adapter + * + * A thin wrapper around the text adapter that adds summarization-specific prompting. + * Delegates all API calls to the GrokTextAdapter. + */ +export class GrokSummarizeAdapter< + TModel extends GrokSummarizeModel, +> extends BaseSummarizeAdapter { + readonly kind = 'summarize' as const + readonly name = 'grok' as const + + private textAdapter: GrokTextAdapter + + constructor(config: GrokSummarizeConfig, model: TModel) { + super({}, model) + this.textAdapter = new GrokTextAdapter(config, model) + } + + async summarize(options: SummarizationOptions): Promise { + const systemPrompt = this.buildSummarizationPrompt(options) + + // Use the text adapter's streaming and collect the result + let summary = '' + let id = '' + let model = options.model + let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } + + for await (const chunk of this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + })) { + if (chunk.type === 'content') { + summary = chunk.content + id = chunk.id + model = chunk.model + } + if (chunk.type === 'done' && chunk.usage) { + usage = chunk.usage + } + } + + return { id, model, summary, usage } + } + + async *summarizeStream( + options: SummarizationOptions, + ): AsyncIterable { + const systemPrompt = this.buildSummarizationPrompt(options) + + // Delegate directly to the text adapter's streaming + yield* this.textAdapter.chatStream({ + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + }) + } + + private buildSummarizationPrompt(options: SummarizationOptions): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + if (options.maxLength) { + prompt += `Keep the summary under ${options.maxLength} tokens. ` + } + + return prompt + } +} + +/** + * Creates a Grok summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok summarize adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokSummarize('grok-3', "xai-..."); + * ``` + */ +export function createGrokSummarize( + model: TModel, + apiKey: string, + config?: Omit, +): GrokSummarizeAdapter { + return new GrokSummarizeAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok summarize adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok summarize adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokSummarize('grok-3'); + * + * await summarize({ + * adapter, + * text: "Long article text..." + * }); + * ``` + */ +export function grokSummarize( + model: TModel, + config?: Omit, +): GrokSummarizeAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokSummarize(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts new file mode 100644 index 00000000..6999d955 --- /dev/null +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -0,0 +1,502 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools' +import { + createGrokClient, + generateId, + getGrokApiKeyFromEnv, + makeGrokStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { GROK_CHAT_MODELS, ResolveInputModalities, ResolveProviderOptions } from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type OpenAI_SDK from 'openai' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { + GrokImageMetadata, + GrokMessageMetadataByModality, +} from '../message-types' +import type { GrokClientConfig } from '../utils' + +/** + * Configuration for Grok text adapter + */ +export interface GrokTextConfig extends GrokClientConfig {} + +/** + * Alias for TextProviderOptions for external use + */ +export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../text/text-provider-options' + +/** + * Grok Text (Chat) Adapter + * + * Tree-shakeable adapter for Grok chat/text completion functionality. + * Uses OpenAI-compatible Chat Completions API (not Responses API). + */ +export class GrokTextAdapter< + TModel extends (typeof GROK_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + GrokMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'grok' as const + + private client: OpenAI_SDK + + constructor(config: GrokTextConfig, model: TModel) { + super({}, model) + this.client = createGrokClient(config) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToGrok(options) + + try { + const stream = await this.client.chat.completions.create({ + ...requestParams, + stream: true, + }) + + yield* this.processGrokStreamChunks(stream, options) + } catch (error: unknown) { + const err = error as Error + console.error('>>> chatStream: Fatal error during response creation <<<') + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + throw error + } + } + + /** + * Generate structured output using Grok's JSON Schema response format. + * Uses stream: false to get the complete response in one call. + * + * Grok has strict requirements for structured output (via OpenAI-compatible API): + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for all objects + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply Grok-specific transformations for structured output compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapTextOptionsToGrok(chatOptions) + + // Apply Grok-specific transformations for structured output compatibility + const jsonSchema = makeGrokStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const response = await this.client.chat.completions.create({ + ...requestParams, + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }) + + // Extract text content from the response + const rawText = response.choices[0]?.message.content || '' + + // Parse the JSON response + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // Transform null values to undefined to match original Zod schema expectations + // Grok returns null for optional fields we made nullable in the schema + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + const err = error as Error + console.error('>>> structuredOutput: Error during response creation <<<') + console.error('>>> Error message:', err.message) + throw error + } + } + + private async *processGrokStreamChunks( + stream: AsyncIterable, + options: TextOptions, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = Date.now() + let responseId = generateId(this.name) + + // Track tool calls being streamed (arguments come in chunks) + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + } + >() + + try { + for await (const chunk of stream) { + responseId = chunk.id || responseId + const choice = chunk.choices[0] + + if (!choice) continue + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.tool_calls + + // Handle content delta + if (deltaContent) { + accumulatedContent += deltaContent + yield { + type: 'content', + id: responseId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent, + content: accumulatedContent, + role: 'assistant', + } + } + + // Handle tool calls - they come in as deltas + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + // Initialize or update the tool call in progress + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + // Update with any new data from the delta + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + } + } + + // Handle finish reason + if (choice.finish_reason) { + // Emit all completed tool calls + if ( + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [index, toolCall] of toolCallsInProgress) { + yield { + type: 'tool_call', + id: responseId, + model: chunk.model || options.model, + timestamp, + index, + toolCall: { + id: toolCall.id, + type: 'function', + function: { + name: toolCall.name, + arguments: toolCall.arguments, + }, + }, + } + } + } + + yield { + type: 'done', + id: responseId, + model: chunk.model || options.model, + timestamp, + usage: chunk.usage + ? { + promptTokens: chunk.usage.prompt_tokens || 0, + completionTokens: chunk.usage.completion_tokens || 0, + totalTokens: chunk.usage.total_tokens || 0, + } + : undefined, + finishReason: + choice.finish_reason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : 'stop', + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log('[Grok Adapter] Stream ended with error:', err.message) + yield { + type: 'error', + id: responseId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Maps common options to Grok-specific Chat Completions format + */ + private mapTextOptionsToGrok( + options: TextOptions, + ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { + const modelOptions = options.modelOptions as + | Omit< + InternalTextProviderOptions, + 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' + > + | undefined + + if (modelOptions) { + validateTextProviderOptions({ + ...modelOptions, + model: options.model, + }) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + // Build messages array with system prompts + const messages: Array = + [] + + // Add system prompts first + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + // Convert messages + for (const message of options.messages) { + messages.push(this.convertMessageToGrok(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: options.topP, + tools: tools as Array, + stream: true, + stream_options: { include_usage: true }, + } + } + + private convertMessageToGrok( + message: ModelMessage, + ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { + // Handle tool messages + if (message.role === 'tool') { + return { + role: 'tool', + tool_call_id: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + // Handle assistant messages + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), + } + } + + // Handle user messages - support multimodal content + const contentParts = this.normalizeContent(message.content) + + // If only text, use simple string format + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + // Otherwise, use array format for multimodal + const parts: Array = + [] + for (const part of contentParts) { + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content }) + } else if (part.type === 'image') { + const imageMetadata = part.metadata as GrokImageMetadata | undefined + parts.push({ + type: 'image_url', + image_url: { + url: part.source.value, + detail: imageMetadata?.detail || 'auto', + }, + }) + } + } + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + // It's an array of ContentPart + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Creates a Grok text adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok text adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokText('grok-3', "xai-..."); + * // adapter has type-safe providerOptions for grok-3 + * ``` + */ +export function createGrokText< + TModel extends (typeof GROK_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): GrokTextAdapter { + return new GrokTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Grok text adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok text adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokText('grok-3'); + * + * const stream = chat({ + * adapter, + * messages: [{ role: "user", content: "Hello!" }] + * }); + * ``` + */ +export function grokText( + model: TModel, + config?: Omit, +): GrokTextAdapter { + const apiKey = getGrokApiKeyFromEnv() + return createGrokText(model, apiKey, config) +} diff --git a/packages/typescript/ai-grok/src/image/image-provider-options.ts b/packages/typescript/ai-grok/src/image/image-provider-options.ts new file mode 100644 index 00000000..9b0d9ee5 --- /dev/null +++ b/packages/typescript/ai-grok/src/image/image-provider-options.ts @@ -0,0 +1,118 @@ +/** + * Grok Image Generation Provider Options + * + * These are provider-specific options for Grok image generation. + * Grok uses the grok-2-image-1212 model for image generation. + */ + +/** + * Supported sizes for grok-2-image-1212 model + */ +export type GrokImageSize = '1024x1024' | '1536x1024' | '1024x1536' + +/** + * Base provider options for Grok image models + */ +export interface GrokImageBaseProviderOptions { + /** + * A unique identifier representing your end-user. + * Can help xAI to monitor and detect abuse. + */ + user?: string +} + +/** + * Provider options for grok-2-image-1212 model + */ +export interface GrokImageProviderOptions extends GrokImageBaseProviderOptions { + /** + * The quality of the image. + * @default 'standard' + */ + quality?: 'standard' | 'hd' + + /** + * The format in which generated images are returned. + * URLs are only valid for 60 minutes after generation. + * @default 'url' + */ + response_format?: 'url' | 'b64_json' +} + +/** + * Type-only map from model name to its specific provider options. + */ +export type GrokImageModelProviderOptionsByName = { + 'grok-2-image-1212': GrokImageProviderOptions +} + +/** + * Type-only map from model name to its supported sizes. + */ +export type GrokImageModelSizeByName = { + 'grok-2-image-1212': GrokImageSize +} + +/** + * Internal options interface for validation + */ +interface ImageValidationOptions { + prompt: string + model: string +} + +/** + * Validates that the provided size is supported by the model. + * Throws a descriptive error if the size is not supported. + */ +export function validateImageSize( + model: string, + size: string | undefined, +): void { + if (!size) return + + const validSizes: Record> = { + 'grok-2-image-1212': ['1024x1024', '1536x1024', '1024x1536'], + } + + const modelSizes = validSizes[model] + if (!modelSizes) { + throw new Error(`Unknown image model: ${model}`) + } + + if (!modelSizes.includes(size)) { + throw new Error( + `Size "${size}" is not supported by model "${model}". ` + + `Supported sizes: ${modelSizes.join(', ')}`, + ) + } +} + +/** + * Validates that the number of images is within bounds for the model. + */ +export function validateNumberOfImages( + _model: string, + numberOfImages: number | undefined, +): void { + if (numberOfImages === undefined) return + + // grok-2-image-1212 supports 1-10 images per request + if (numberOfImages < 1 || numberOfImages > 10) { + throw new Error( + `Number of images must be between 1 and 10. Requested: ${numberOfImages}`, + ) + } +} + +export const validatePrompt = (options: ImageValidationOptions) => { + if (options.prompt.length === 0) { + throw new Error('Prompt cannot be empty.') + } + // Grok image model supports up to 4000 characters + if (options.prompt.length > 4000) { + throw new Error( + 'For grok-2-image-1212, prompt length must be less than or equal to 4000 characters.', + ) + } +} diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts new file mode 100644 index 00000000..1002a5dc --- /dev/null +++ b/packages/typescript/ai-grok/src/index.ts @@ -0,0 +1,55 @@ +// ============================================================================ +// New Tree-Shakeable Adapters (Recommended) +// ============================================================================ + +// Text (Chat) adapter - for chat/text completion +export { + GrokTextAdapter, + createGrokText, + grokText, + type GrokTextConfig, + type GrokTextProviderOptions, +} from './adapters/text' + +// Summarize adapter - for text summarization +export { + GrokSummarizeAdapter, + createGrokSummarize, + grokSummarize, + type GrokSummarizeConfig, + type GrokSummarizeProviderOptions, + type GrokSummarizeModel, +} from './adapters/summarize' + +// Image adapter - for image generation +export { + GrokImageAdapter, + createGrokImage, + grokImage, + type GrokImageConfig, + type GrokImageModel, +} from './adapters/image' +export type { + GrokImageProviderOptions, + GrokImageModelProviderOptionsByName, +} from './image/image-provider-options' + +// ============================================================================ +// Type Exports +// ============================================================================ + +export type { + GrokChatModelProviderOptionsByName, + GrokModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, +} from './model-meta' +export { GROK_CHAT_MODELS, GROK_IMAGE_MODELS } from './model-meta' +export type { + GrokTextMetadata, + GrokImageMetadata, + GrokAudioMetadata, + GrokVideoMetadata, + GrokDocumentMetadata, + GrokMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-grok/src/message-types.ts b/packages/typescript/ai-grok/src/message-types.ts new file mode 100644 index 00000000..ec9e7b18 --- /dev/null +++ b/packages/typescript/ai-grok/src/message-types.ts @@ -0,0 +1,67 @@ +/** + * Grok-specific metadata types for multimodal content parts. + * These types extend the base ContentPart metadata with Grok-specific options. + * + * Grok uses an OpenAI-compatible API, so metadata types are similar to OpenAI. + * + * @see https://docs.x.ai + */ + +/** + * Metadata for Grok image content parts. + * Controls how the model processes and analyzes images. + */ +export interface GrokImageMetadata { + /** + * Controls how the model processes the image. + * - 'auto': Let the model decide based on image size and content + * - 'low': Use low resolution processing (faster, cheaper, less detail) + * - 'high': Use high resolution processing (slower, more expensive, more detail) + * + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Grok audio content parts. + * Specifies the audio format for proper processing. + */ +export interface GrokAudioMetadata { + /** + * The format of the audio. + * Supported formats: mp3, wav, flac, etc. + * @default 'mp3' + */ + format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac' +} + +/** + * Metadata for Grok video content parts. + * Note: Video support in Grok is limited; check current API capabilities. + */ +export interface GrokVideoMetadata {} + +/** + * Metadata for Grok document content parts. + * Note: Direct document support may vary; PDFs often need to be converted to images. + */ +export interface GrokDocumentMetadata {} + +/** + * Metadata for Grok text content parts. + * Currently no specific metadata options for text in Grok. + */ +export interface GrokTextMetadata {} + +/** + * Map of modality types to their Grok-specific metadata types. + * Used for type inference when constructing multimodal messages. + */ +export interface GrokMessageMetadataByModality { + text: GrokTextMetadata + image: GrokImageMetadata + audio: GrokAudioMetadata + video: GrokVideoMetadata + document: GrokDocumentMetadata +} diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts new file mode 100644 index 00000000..fc89a098 --- /dev/null +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -0,0 +1,84 @@ +/** + * Grok Chat Models + * Based on xAI's available models as of 2025 + */ +export const GROK_CHAT_MODELS = [ + 'grok-4', + 'grok-3', + 'grok-3-mini', + 'grok-2-vision-1212', + 'grok-4-fast', + 'grok-4.1-fast', +] as const + +/** + * Grok Image Generation Models + */ +export const GROK_IMAGE_MODELS = ['grok-2-image-1212'] as const + +/** + * Type-only map from Grok chat model name to its supported input modalities. + * Used for type inference when constructing multimodal messages. + */ +export type GrokModelInputModalitiesByName = { + // Text-only models + 'grok-4': readonly ['text'] + 'grok-3': readonly ['text'] + 'grok-3-mini': readonly ['text'] + 'grok-4-fast': readonly ['text'] + 'grok-4.1-fast': readonly ['text'] + // Vision-capable model (text + image) + 'grok-2-vision-1212': readonly ['text', 'image'] +} + +/** + * Type-only map from Grok chat model name to its provider options type. + * Since Grok uses OpenAI-compatible API, we can reuse OpenAI provider options patterns. + * For now, all models share the same provider options structure. + */ +export type GrokChatModelProviderOptionsByName = { + [K in (typeof GROK_CHAT_MODELS)[number]]: GrokProviderOptions +} + +/** + * Grok-specific provider options + * Based on OpenAI-compatible API options + */ +export interface GrokProviderOptions { + /** Temperature for response generation (0-2) */ + temperature?: number + /** Maximum tokens in the response */ + max_tokens?: number + /** Top-p sampling parameter */ + top_p?: number + /** Frequency penalty (-2.0 to 2.0) */ + frequency_penalty?: number + /** Presence penalty (-2.0 to 2.0) */ + presence_penalty?: number + /** Stop sequences */ + stop?: string | Array + /** A unique identifier representing your end-user */ + user?: string +} + +// =========================== +// Type Resolution Helpers +// =========================== + +/** + * Resolve provider options for a specific model. + * If the model has explicit options in the map, use those; otherwise use base options. + */ +export type ResolveProviderOptions = + TModel extends keyof GrokChatModelProviderOptionsByName + ? GrokChatModelProviderOptionsByName[TModel] + : GrokProviderOptions + +/** + * Resolve input modalities for a specific model. + * If the model has explicit modalities in the map, use those; otherwise use text only. + */ +export type ResolveInputModalities = + TModel extends keyof GrokModelInputModalitiesByName + ? GrokModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts new file mode 100644 index 00000000..a05222ff --- /dev/null +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -0,0 +1,77 @@ +import type { FunctionTool } from '../tools/function-tool' + +/** + * Grok Text Provider Options + * + * Grok uses an OpenAI-compatible Chat Completions API. + * However, not all OpenAI features may be supported by Grok. + */ + +/** + * Base provider options for Grok text/chat models + */ +export interface GrokBaseOptions { + /** + * A unique identifier representing your end-user. + * Can help xAI to monitor and detect abuse. + */ + user?: string +} + +/** + * Grok-specific provider options for text/chat + * Based on OpenAI-compatible API options + */ +export interface GrokTextProviderOptions extends GrokBaseOptions { + /** + * Temperature for response generation (0-2) + * Higher values make output more random, lower values more focused + */ + temperature?: number + /** + * Top-p sampling parameter (0-1) + * Alternative to temperature, nucleus sampling + */ + top_p?: number + /** + * Maximum tokens in the response + */ + max_tokens?: number + /** + * Frequency penalty (-2.0 to 2.0) + */ + frequency_penalty?: number + /** + * Presence penalty (-2.0 to 2.0) + */ + presence_penalty?: number + /** + * Stop sequences + */ + stop?: string | Array +} + +/** + * Internal options interface for validation + * Used internally by the adapter + */ +export interface InternalTextProviderOptions extends GrokTextProviderOptions { + model: string + stream?: boolean + tools?: Array +} + +/** + * External provider options (what users pass in) + */ +export type ExternalTextProviderOptions = GrokTextProviderOptions + +/** + * Validates text provider options + */ +export function validateTextProviderOptions( + _options: InternalTextProviderOptions, +): void { + // Basic validation can be added here if needed + // For now, Grok API will handle validation +} diff --git a/packages/typescript/ai-grok/src/tools/function-tool.ts b/packages/typescript/ai-grok/src/tools/function-tool.ts new file mode 100644 index 00000000..646fb895 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/function-tool.ts @@ -0,0 +1,45 @@ +import { makeGrokStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type OpenAI from 'openai' + +// Use Chat Completions API tool format (not Responses API) +export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool + +/** + * Converts a standard Tool to Grok ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Grok-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + // Tool schemas are already converted to JSON Schema in the ai layer + // Apply Grok-specific transformations for strict mode + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const jsonSchema = makeGrokStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + // Ensure additionalProperties is false for strict mode + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, // Always use strict mode since our schema converter handles the requirements + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts new file mode 100644 index 00000000..c9033415 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts new file mode 100644 index 00000000..969fdb72 --- /dev/null +++ b/packages/typescript/ai-grok/src/tools/tool-converter.ts @@ -0,0 +1,17 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Grok-specific format + * Grok uses OpenAI-compatible API, so we primarily support function tools + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + // For Grok, all tools are converted as function tools + // Grok uses OpenAI-compatible API which primarily supports function tools + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts new file mode 100644 index 00000000..2a559076 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -0,0 +1,45 @@ +import OpenAI_SDK from 'openai' + +export interface GrokClientConfig { + apiKey: string + baseURL?: string +} + +/** + * Creates a Grok SDK client instance using OpenAI SDK with xAI's base URL + */ +export function createGrokClient(config: GrokClientConfig): OpenAI_SDK { + return new OpenAI_SDK({ + apiKey: config.apiKey, + baseURL: config.baseURL || 'https://api.x.ai/v1', + }) +} + +/** + * Gets Grok API key from environment variables + * @throws Error if XAI_API_KEY is not found + */ +export function getGrokApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.XAI_API_KEY + + if (!key) { + throw new Error( + 'XAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-grok/src/utils/index.ts b/packages/typescript/ai-grok/src/utils/index.ts new file mode 100644 index 00000000..72c2f529 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createGrokClient, + getGrokApiKeyFromEnv, + generateId, + type GrokClientConfig, +} from './client' +export { + makeGrokStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts new file mode 100644 index 00000000..38c345e2 --- /dev/null +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -0,0 +1,110 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Grok's structured output (via OpenAI-compatible API) requires all fields to be + * in the `required` array, with optional fields made nullable (type: ["string", "null"]). + * When Grok returns null for optional fields, we need to convert them back to + * undefined to match the original Zod schema expectations. + * + * @param obj - Object to transform + * @returns Object with nulls converted to undefined + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if (typeof obj === 'object') { + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + const transformed = transformNullsToUndefined(value) + // Only include the key if the value is not undefined + // This makes { notes: null } become {} (field absent) instead of { notes: undefined } + if (transformed !== undefined) { + result[key] = transformed + } + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Grok's structured output requirements (OpenAI-compatible). + * Grok requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with Grok structured output + */ +export function makeGrokStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + // Handle object types + if (result.type === 'object' && result.properties) { + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + // Transform each property + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + // Recursively transform nested objects/arrays + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeGrokStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeGrokStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (wasOptional) { + // Make optional fields nullable by adding null to the type + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + // ALL properties must be required for Grok structured output + result.required = allPropertyNames + // additionalProperties must be false + result.additionalProperties = false + } + + // Handle array types with object items + if (result.type === 'array' && result.items) { + result.items = makeGrokStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts new file mode 100644 index 00000000..2cc86797 --- /dev/null +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -0,0 +1,221 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest' +import { chat, type Tool, type StreamChunk } from '@tanstack/ai' +import { GrokTextAdapter } from '../src/adapters/text' + +const createAdapter = () => new GrokTextAdapter({ apiKey: 'test-key' }, 'grok-3') + +const toolArguments = JSON.stringify({ location: 'Berlin' }) + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +function createMockChatCompletionsStream( + chunks: Array>, +): AsyncIterable> { + return { + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + }, + } +} + +describe('Grok adapter option mapping', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('maps options into the Chat Completions API payload', async () => { + // Mock the Chat Completions API stream format + const mockStream = createMockChatCompletionsStream([ + { + id: 'chatcmpl-123', + model: 'grok-3', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + content: 'It is sunny', + }, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'grok-3', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16, + }, + }, + ]) + + const chatCompletionsCreate = vi.fn().mockResolvedValueOnce(mockStream) + + const adapter = createAdapter() + // Replace the internal OpenAI SDK client with our mock + ;(adapter as any).client = { + chat: { + completions: { + create: chatCompletionsCreate, + }, + }, + } + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [ + { role: 'user', content: 'How is the weather?' }, + { + role: 'assistant', + content: 'Let me check', + toolCalls: [ + { + id: 'call_weather', + type: 'function', + function: { name: 'lookup_weather', arguments: toolArguments }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_weather', content: '{"temp":72}' }, + ], + systemPrompts: ['Stay concise'], + tools: [weatherTool], + temperature: 0.25, + topP: 0.6, + maxTokens: 1024, + })) { + chunks.push(chunk) + } + + expect(chatCompletionsCreate).toHaveBeenCalledTimes(1) + const [payload] = chatCompletionsCreate.mock.calls[0] + + // Chat Completions API format + expect(payload).toMatchObject({ + model: 'grok-3', + temperature: 0.25, + top_p: 0.6, + max_tokens: 1024, + stream: true, + }) + + // Chat Completions API uses 'messages' array + expect(payload.messages).toBeDefined() + expect(Array.isArray(payload.messages)).toBe(true) + + // Verify tools are included + expect(payload.tools).toBeDefined() + expect(Array.isArray(payload.tools)).toBe(true) + expect(payload.tools.length).toBeGreaterThan(0) + }) + + it('handles tool calls in streaming response', async () => { + // Mock the Chat Completions API stream with tool calls + const mockStream = createMockChatCompletionsStream([ + { + id: 'chatcmpl-123', + model: 'grok-3', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + content: null, + tool_calls: [ + { + index: 0, + id: 'call_123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '', + }, + }, + ], + }, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'grok-3', + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + index: 0, + function: { + arguments: '{"location": "Berlin"}', + }, + }, + ], + }, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'grok-3', + choices: [ + { + index: 0, + delta: {}, + finish_reason: 'tool_calls', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 8, + total_tokens: 20, + }, + }, + ]) + + const chatCompletionsCreate = vi.fn().mockResolvedValueOnce(mockStream) + + const adapter = createAdapter() + ;(adapter as any).client = { + chat: { + completions: { + create: chatCompletionsCreate, + }, + }, + } + + const chunks: StreamChunk[] = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'How is the weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + // Verify we got a tool_call chunk + const toolCallChunks = chunks.filter((c) => c.type === 'tool_call') + expect(toolCallChunks.length).toBeGreaterThan(0) + + // Verify done chunk has tool_calls finish reason + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + if (doneChunk && doneChunk.type === 'done') { + expect(doneChunk.finishReason).toBe('tool_calls') + } + }) +}) diff --git a/packages/typescript/ai-grok/tsconfig.json b/packages/typescript/ai-grok/tsconfig.json new file mode 100644 index 00000000..ea11c109 --- /dev/null +++ b/packages/typescript/ai-grok/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-grok/vite.config.ts b/packages/typescript/ai-grok/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-grok/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/smoke-tests/adapters/package.json b/packages/typescript/smoke-tests/adapters/package.json index 17f686eb..67654826 100644 --- a/packages/typescript/smoke-tests/adapters/package.json +++ b/packages/typescript/smoke-tests/adapters/package.json @@ -14,6 +14,7 @@ "@tanstack/ai": "workspace:*", "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "commander": "^13.1.0" diff --git a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts index 91efe0a2..27ab1fbb 100644 --- a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts +++ b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts @@ -5,6 +5,7 @@ import { geminiSummarize, geminiText, } from '@tanstack/ai-gemini' +import { grokImage, grokSummarize, grokText } from '@tanstack/ai-grok' import { ollamaSummarize, ollamaText } from '@tanstack/ai-ollama' import { openaiImage, @@ -77,6 +78,10 @@ const GEMINI_TTS_MODEL = const OLLAMA_MODEL = process.env.OLLAMA_MODEL || 'mistral:7b' const OLLAMA_SUMMARY_MODEL = process.env.OLLAMA_SUMMARY_MODEL || OLLAMA_MODEL +const GROK_MODEL = process.env.GROK_MODEL || 'grok-3' +const GROK_SUMMARY_MODEL = process.env.GROK_SUMMARY_MODEL || GROK_MODEL +const GROK_IMAGE_MODEL = process.env.GROK_IMAGE_MODEL || 'grok-2-image-1212' + /** * Create Anthropic adapters */ @@ -160,6 +165,23 @@ function createOllamaAdapters(): AdapterSet | null { } } +/** + * Create Grok adapters + */ +function createGrokAdapters(): AdapterSet | null { + const apiKey = process.env.XAI_API_KEY + if (!apiKey) return null + + return { + textAdapter: grokText(GROK_MODEL as any, { apiKey } as any), + summarizeAdapter: grokSummarize(GROK_SUMMARY_MODEL as any, { apiKey } as any), + imageAdapter: grokImage(GROK_IMAGE_MODEL as any, { apiKey } as any), + chatModel: GROK_MODEL, + summarizeModel: GROK_SUMMARY_MODEL, + imageModel: GROK_IMAGE_MODEL, + } +} + /** * Registry of all available adapters */ @@ -189,6 +211,12 @@ export const ADAPTERS: Array = [ envKey: null, create: createOllamaAdapters, }, + { + id: 'grok', + name: 'Grok', + envKey: 'XAI_API_KEY', + create: createGrokAdapters, + }, ] /** diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c3ed56e5..ff102908 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -201,6 +201,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../packages/typescript/ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../packages/typescript/ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -695,6 +698,25 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-grok: + dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.18.3)(zod@4.2.1) + zod: + specifier: ^4.0.0 + version: 4.2.1 + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.15(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-ollama: dependencies: '@tanstack/ai': @@ -1012,6 +1034,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../ai-ollama @@ -1144,6 +1169,9 @@ importers: '@tanstack/ai-gemini': specifier: workspace:* version: link:../../packages/typescript/ai-gemini + '@tanstack/ai-grok': + specifier: workspace:* + version: link:../../packages/typescript/ai-grok '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -13736,6 +13764,11 @@ snapshots: ws: 8.18.3 zod: 4.1.13 + openai@6.10.0(ws@8.18.3)(zod@4.2.1): + optionalDependencies: + ws: 8.18.3 + zod: 4.2.1 + optionator@0.9.4: dependencies: deep-is: 0.1.4 diff --git a/testing/panel/package.json b/testing/panel/package.json index 2ae153c3..f5d6863c 100644 --- a/testing/panel/package.json +++ b/testing/panel/package.json @@ -13,6 +13,7 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-react": "workspace:*", diff --git a/testing/panel/src/lib/model-selection.ts b/testing/panel/src/lib/model-selection.ts index 0412d275..4d40ccc7 100644 --- a/testing/panel/src/lib/model-selection.ts +++ b/testing/panel/src/lib/model-selection.ts @@ -1,4 +1,4 @@ -export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' export interface ModelOption { provider: Provider @@ -67,6 +67,23 @@ export const MODEL_OPTIONS: Array = [ model: 'smollm', label: 'Ollama - SmolLM', }, + + // Grok + { + provider: 'grok', + model: 'grok-3', + label: 'Grok - Grok 3', + }, + { + provider: 'grok', + model: 'grok-3-mini', + label: 'Grok - Grok 3 Mini', + }, + { + provider: 'grok', + model: 'grok-2-vision-1212', + label: 'Grok - Grok 2 Vision', + }, ] const STORAGE_KEY = 'tanstack-ai-model-preference' diff --git a/testing/panel/src/routes/api.chat.ts b/testing/panel/src/routes/api.chat.ts index 6063c574..3f2a95d1 100644 --- a/testing/panel/src/routes/api.chat.ts +++ b/testing/panel/src/routes/api.chat.ts @@ -9,6 +9,7 @@ import { } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' +import { grokText } from '@tanstack/ai-grok' import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import type { AIAdapter, StreamChunk } from '@tanstack/ai' @@ -51,7 +52,7 @@ const addToCartToolServer = addToCartToolDef.server((args) => ({ totalItems: args.quantity, })) -type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' +type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'grok' /** * Wraps an adapter to intercept chatStream and record raw chunks from the adapter @@ -172,6 +173,10 @@ export const Route = createFileRoute('/api/chat')({ createChatOptions({ adapter: geminiText((model || 'gemini-2.0-flash') as any), }), + grok: () => + createChatOptions({ + adapter: grokText((model || 'grok-3') as any), + }), ollama: () => createChatOptions({ adapter: ollamaText((model || 'mistral:7b') as any), From 43e1ddfc07b61383289c304836f31baafe8f34c9 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 23 Dec 2025 14:47:48 +0000 Subject: [PATCH 2/9] ci: apply automated fixes --- packages/typescript/ai-grok/src/adapters/summarize.ts | 6 +++++- packages/typescript/ai-grok/src/adapters/text.ts | 6 +++++- packages/typescript/ai-grok/tests/grok-adapter.test.ts | 3 ++- .../typescript/smoke-tests/adapters/src/adapters/index.ts | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 1115732d..5cd273f0 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -2,7 +2,11 @@ import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' import type { GROK_CHAT_MODELS } from '../model-meta' -import type { StreamChunk, SummarizationOptions, SummarizationResult } from '@tanstack/ai' +import type { + StreamChunk, + SummarizationOptions, + SummarizationResult, +} from '@tanstack/ai' import type { GrokClientConfig } from '../utils' /** diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index 6999d955..bef2ffaf 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -8,7 +8,11 @@ import { makeGrokStructuredOutputCompatible, transformNullsToUndefined, } from '../utils' -import type { GROK_CHAT_MODELS, ResolveInputModalities, ResolveProviderOptions } from '../model-meta' +import type { + GROK_CHAT_MODELS, + ResolveInputModalities, + ResolveProviderOptions, +} from '../model-meta' import type { StructuredOutputOptions, StructuredOutputResult, diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 2cc86797..4730a0f1 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -2,7 +2,8 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' import { chat, type Tool, type StreamChunk } from '@tanstack/ai' import { GrokTextAdapter } from '../src/adapters/text' -const createAdapter = () => new GrokTextAdapter({ apiKey: 'test-key' }, 'grok-3') +const createAdapter = () => + new GrokTextAdapter({ apiKey: 'test-key' }, 'grok-3') const toolArguments = JSON.stringify({ location: 'Berlin' }) diff --git a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts index 27ab1fbb..4eb9b883 100644 --- a/packages/typescript/smoke-tests/adapters/src/adapters/index.ts +++ b/packages/typescript/smoke-tests/adapters/src/adapters/index.ts @@ -174,7 +174,10 @@ function createGrokAdapters(): AdapterSet | null { return { textAdapter: grokText(GROK_MODEL as any, { apiKey } as any), - summarizeAdapter: grokSummarize(GROK_SUMMARY_MODEL as any, { apiKey } as any), + summarizeAdapter: grokSummarize( + GROK_SUMMARY_MODEL as any, + { apiKey } as any, + ), imageAdapter: grokImage(GROK_IMAGE_MODEL as any, { apiKey } as any), chatModel: GROK_MODEL, summarizeModel: GROK_SUMMARY_MODEL, From 9ded201c6b2a61f97b734c8b254cc58e3df36285 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 23 Dec 2025 18:09:05 +0100 Subject: [PATCH 3/9] fix grok model issues --- .../typescript/ai-grok/src/adapters/image.ts | 15 +- .../typescript/ai-grok/src/adapters/text.ts | 11 +- .../src/image/image-provider-options.ts | 60 +--- packages/typescript/ai-grok/src/model-meta.ts | 290 ++++++++++++++++-- .../ai-grok/src/text/text-provider-options.ts | 24 -- 5 files changed, 281 insertions(+), 119 deletions(-) diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index beb5dd18..b92cd24a 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,16 +1,15 @@ import { BaseImageAdapter } from '@tanstack/ai/adapters' import { createGrokClient, generateId, getGrokApiKeyFromEnv } from '../utils' import { - validateImageSize, validateNumberOfImages, validatePrompt, } from '../image/image-provider-options' -import type { GROK_IMAGE_MODELS } from '../model-meta' import type { + GROK_IMAGE_MODELS, GrokImageModelProviderOptionsByName, GrokImageModelSizeByName, - GrokImageProviderOptions, -} from '../image/image-provider-options' +} from '../model-meta' +import type { GrokImageProviderOptions } from '../image/image-provider-options' import type { GeneratedImage, ImageGenerationOptions, @@ -59,11 +58,11 @@ export class GrokImageAdapter< async generateImages( options: ImageGenerationOptions, ): Promise { - const { model, prompt, numberOfImages, size } = options + const { model, prompt, numberOfImages } = options // Validate inputs validatePrompt({ prompt, model }) - validateImageSize(model, size) + validateNumberOfImages(model, numberOfImages) // Build request based on model type @@ -80,13 +79,13 @@ export class GrokImageAdapter< private buildRequest( options: ImageGenerationOptions, ): OpenAI_SDK.Images.ImageGenerateParams { - const { model, prompt, numberOfImages, size, modelOptions } = options + const { model, prompt, numberOfImages, modelOptions } = options return { model, prompt, n: numberOfImages ?? 1, - size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, } } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index bef2ffaf..24390525 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,5 +1,4 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' import { createGrokClient, @@ -301,13 +300,6 @@ export class GrokTextAdapter< > | undefined - if (modelOptions) { - validateTextProviderOptions({ - ...modelOptions, - model: options.model, - }) - } - const tools = options.tools ? convertToolsToProviderFormat(options.tools) : undefined @@ -336,8 +328,9 @@ export class GrokTextAdapter< max_tokens: options.maxTokens, top_p: options.topP, tools: tools as Array, - stream: true, stream_options: { include_usage: true }, + ...modelOptions, + stream: true, } } diff --git a/packages/typescript/ai-grok/src/image/image-provider-options.ts b/packages/typescript/ai-grok/src/image/image-provider-options.ts index 9b0d9ee5..87805940 100644 --- a/packages/typescript/ai-grok/src/image/image-provider-options.ts +++ b/packages/typescript/ai-grok/src/image/image-provider-options.ts @@ -1,15 +1,3 @@ -/** - * Grok Image Generation Provider Options - * - * These are provider-specific options for Grok image generation. - * Grok uses the grok-2-image-1212 model for image generation. - */ - -/** - * Supported sizes for grok-2-image-1212 model - */ -export type GrokImageSize = '1024x1024' | '1536x1024' | '1024x1536' - /** * Base provider options for Grok image models */ @@ -27,9 +15,8 @@ export interface GrokImageBaseProviderOptions { export interface GrokImageProviderOptions extends GrokImageBaseProviderOptions { /** * The quality of the image. - * @default 'standard' */ - quality?: 'standard' | 'hd' + quality?: 'low' | 'medium' | 'high' /** * The format in which generated images are returned. @@ -39,20 +26,6 @@ export interface GrokImageProviderOptions extends GrokImageBaseProviderOptions { response_format?: 'url' | 'b64_json' } -/** - * Type-only map from model name to its specific provider options. - */ -export type GrokImageModelProviderOptionsByName = { - 'grok-2-image-1212': GrokImageProviderOptions -} - -/** - * Type-only map from model name to its supported sizes. - */ -export type GrokImageModelSizeByName = { - 'grok-2-image-1212': GrokImageSize -} - /** * Internal options interface for validation */ @@ -61,33 +34,6 @@ interface ImageValidationOptions { model: string } -/** - * Validates that the provided size is supported by the model. - * Throws a descriptive error if the size is not supported. - */ -export function validateImageSize( - model: string, - size: string | undefined, -): void { - if (!size) return - - const validSizes: Record> = { - 'grok-2-image-1212': ['1024x1024', '1536x1024', '1024x1536'], - } - - const modelSizes = validSizes[model] - if (!modelSizes) { - throw new Error(`Unknown image model: ${model}`) - } - - if (!modelSizes.includes(size)) { - throw new Error( - `Size "${size}" is not supported by model "${model}". ` + - `Supported sizes: ${modelSizes.join(', ')}`, - ) - } -} - /** * Validates that the number of images is within bounds for the model. */ @@ -97,7 +43,7 @@ export function validateNumberOfImages( ): void { if (numberOfImages === undefined) return - // grok-2-image-1212 supports 1-10 images per request + // grok-2-image supports 1-10 images per request if (numberOfImages < 1 || numberOfImages > 10) { throw new Error( `Number of images must be between 1 and 10. Requested: ${numberOfImages}`, @@ -112,7 +58,7 @@ export const validatePrompt = (options: ImageValidationOptions) => { // Grok image model supports up to 4000 characters if (options.prompt.length > 4000) { throw new Error( - 'For grok-2-image-1212, prompt length must be less than or equal to 4000 characters.', + 'For grok-2-image, prompt length must be less than or equal to 4000 characters.', ) } } diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts index fc89a098..20c44aff 100644 --- a/packages/typescript/ai-grok/src/model-meta.ts +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -1,20 +1,237 @@ +import type { GrokTextProviderOptions } from './adapters/text' +import type { GrokImageProviderOptions } from './image/image-provider-options' + +interface ModelMeta { + name: string + supports: { + input: Array<'text' | 'image' | 'audio' | 'video' | 'document'> + output: Array<'text' | 'image' | 'audio' | 'video'> + capabilities?: Array<'reasoning' | 'tool_calling' | 'structured_outputs'> + } + max_input_tokens?: number + max_output_tokens?: number + context_window?: number + knowledge_cutoff?: string + pricing?: { + input: { + normal: number + cached?: number + } + output: { + normal: number + } + } + /** + * Type-level description of which provider options this model supports. + */ + providerOptions?: TProviderOptions +} + +const GROK_4_1_FAST_REASONING = { + name: 'grok-4.1-fast-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_1_FAST_NON_REASONING = { + name: 'grok-4.1-fast-non-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_CODE_FAST_1 = { + name: 'grok-code-fast-1', + context_window: 256_000, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.02, + }, + output: { + normal: 1.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_FAST_REASONING = { + name: 'grok-4-fast-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_FAST_NON_REASONING = { + name: 'grok-4-fast-non-reasoning', + context_window: 2_000_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_4_0709 = { + name: 'grok-4-0709', + context_window: 256_000, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, +} as const satisfies ModelMeta + +const GROK_3_MINI = { + name: 'grok-3-mini', + context_window: 131_072, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['reasoning', 'structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 0.3, + cached: 0.075, + }, + output: { + normal: 0.5, + }, + }, +} as const satisfies ModelMeta + +const GROK_3 = { + name: 'grok-3', + context_window: 131_072, + supports: { + input: ['text'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, +} as const satisfies ModelMeta + +const GROK_2_VISION = { + name: 'grok-2-vision-1212', + context_window: 32_768, + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['structured_outputs', 'tool_calling'], + }, + pricing: { + input: { + normal: 2, + }, + output: { + normal: 10, + }, + }, +} as const satisfies ModelMeta + +const GROK_2_IMAGE = { + name: 'grok-2-image-1212', + supports: { + input: ['text'], + output: ['text'], + }, + pricing: { + input: { + normal: 0.07, + }, + output: { + normal: 0.07, + }, + }, +} as const satisfies ModelMeta /** * Grok Chat Models * Based on xAI's available models as of 2025 */ export const GROK_CHAT_MODELS = [ - 'grok-4', - 'grok-3', - 'grok-3-mini', - 'grok-2-vision-1212', - 'grok-4-fast', - 'grok-4.1-fast', + GROK_4_1_FAST_REASONING.name, + GROK_4_1_FAST_NON_REASONING.name, + GROK_CODE_FAST_1.name, + GROK_4_FAST_REASONING.name, + GROK_4_FAST_NON_REASONING.name, + GROK_4_0709.name, + GROK_3.name, + GROK_3_MINI.name, + GROK_2_VISION.name, ] as const /** * Grok Image Generation Models */ -export const GROK_IMAGE_MODELS = ['grok-2-image-1212'] as const +export const GROK_IMAGE_MODELS = [GROK_2_IMAGE.name] as const /** * Type-only map from Grok chat model name to its supported input modalities. @@ -22,13 +239,16 @@ export const GROK_IMAGE_MODELS = ['grok-2-image-1212'] as const */ export type GrokModelInputModalitiesByName = { // Text-only models - 'grok-4': readonly ['text'] - 'grok-3': readonly ['text'] - 'grok-3-mini': readonly ['text'] - 'grok-4-fast': readonly ['text'] - 'grok-4.1-fast': readonly ['text'] - // Vision-capable model (text + image) - 'grok-2-vision-1212': readonly ['text', 'image'] + [GROK_4_1_FAST_REASONING.name]: typeof GROK_4_1_FAST_REASONING.supports.input + [GROK_4_1_FAST_NON_REASONING.name]: typeof GROK_4_1_FAST_NON_REASONING.supports.input + [GROK_CODE_FAST_1.name]: typeof GROK_CODE_FAST_1.supports.input + [GROK_4_FAST_REASONING.name]: typeof GROK_4_FAST_REASONING.supports.input + [GROK_4_FAST_NON_REASONING.name]: typeof GROK_4_FAST_NON_REASONING.supports.input + [GROK_4_0709.name]: typeof GROK_4_0709.supports.input + [GROK_3.name]: typeof GROK_3.supports.input + [GROK_3_MINI.name]: typeof GROK_3_MINI.supports.input + // Multimodal model + [GROK_2_VISION.name]: typeof GROK_2_VISION.supports.input } /** @@ -37,20 +257,34 @@ export type GrokModelInputModalitiesByName = { * For now, all models share the same provider options structure. */ export type GrokChatModelProviderOptionsByName = { - [K in (typeof GROK_CHAT_MODELS)[number]]: GrokProviderOptions + [GROK_4_1_FAST_REASONING.name]: GrokTextProviderOptions & ReasoningOptions + [GROK_4_1_FAST_NON_REASONING.name]: GrokTextProviderOptions + [GROK_CODE_FAST_1.name]: GrokTextProviderOptions & ReasoningOptions + [GROK_4_FAST_REASONING.name]: GrokTextProviderOptions & ReasoningOptions + [GROK_4_FAST_NON_REASONING.name]: GrokTextProviderOptions + [GROK_4_0709.name]: GrokTextProviderOptions & ReasoningOptions + [GROK_3.name]: GrokTextProviderOptions + [GROK_3_MINI.name]: GrokTextProviderOptions & ReasoningOptions + [GROK_2_VISION.name]: GrokTextProviderOptions } +type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' +type ReasoningSummary = 'auto' | 'detailed' +/** + * Reasoning options for most models (excludes 'concise' summary). + */ +export interface ReasoningOptions { + reasoning?: { + effort?: ReasoningEffort + + summary?: ReasoningSummary + } +} /** * Grok-specific provider options * Based on OpenAI-compatible API options */ export interface GrokProviderOptions { - /** Temperature for response generation (0-2) */ - temperature?: number - /** Maximum tokens in the response */ - max_tokens?: number - /** Top-p sampling parameter */ - top_p?: number /** Frequency penalty (-2.0 to 2.0) */ frequency_penalty?: number /** Presence penalty (-2.0 to 2.0) */ @@ -82,3 +316,17 @@ export type ResolveInputModalities = TModel extends keyof GrokModelInputModalitiesByName ? GrokModelInputModalitiesByName[TModel] : readonly ['text'] + +/** + * Type-only map from model name to its specific provider options. + */ +export type GrokImageModelProviderOptionsByName = { + [GROK_2_IMAGE.name]: GrokImageProviderOptions +} + +/** + * Type-only map from model name to its supported sizes. + */ +export type GrokImageModelSizeByName = { + [GROK_2_IMAGE.name]: never +} diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts index a05222ff..ab2aac14 100644 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -23,20 +23,6 @@ export interface GrokBaseOptions { * Based on OpenAI-compatible API options */ export interface GrokTextProviderOptions extends GrokBaseOptions { - /** - * Temperature for response generation (0-2) - * Higher values make output more random, lower values more focused - */ - temperature?: number - /** - * Top-p sampling parameter (0-1) - * Alternative to temperature, nucleus sampling - */ - top_p?: number - /** - * Maximum tokens in the response - */ - max_tokens?: number /** * Frequency penalty (-2.0 to 2.0) */ @@ -65,13 +51,3 @@ export interface InternalTextProviderOptions extends GrokTextProviderOptions { * External provider options (what users pass in) */ export type ExternalTextProviderOptions = GrokTextProviderOptions - -/** - * Validates text provider options - */ -export function validateTextProviderOptions( - _options: InternalTextProviderOptions, -): void { - // Basic validation can be added here if needed - // For now, Grok API will handle validation -} From f8469a165df058bd0d0e3d63add267cfe2a4028b Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 23 Dec 2025 18:12:44 +0100 Subject: [PATCH 4/9] fix grok model issues --- packages/typescript/ai-grok/src/model-meta.ts | 15 ++--------- .../ai-grok/src/text/text-provider-options.ts | 27 +++++++++---------- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts index 20c44aff..b2423e3a 100644 --- a/packages/typescript/ai-grok/src/model-meta.ts +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -1,3 +1,4 @@ +import type { ReasoningOptions } from './text/text-provider-options' import type { GrokTextProviderOptions } from './adapters/text' import type { GrokImageProviderOptions } from './image/image-provider-options' @@ -267,19 +268,7 @@ export type GrokChatModelProviderOptionsByName = { [GROK_3_MINI.name]: GrokTextProviderOptions & ReasoningOptions [GROK_2_VISION.name]: GrokTextProviderOptions } -type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' -type ReasoningSummary = 'auto' | 'detailed' -/** - * Reasoning options for most models (excludes 'concise' summary). - */ -export interface ReasoningOptions { - reasoning?: { - effort?: ReasoningEffort - - summary?: ReasoningSummary - } -} /** * Grok-specific provider options * Based on OpenAI-compatible API options @@ -306,7 +295,7 @@ export interface GrokProviderOptions { export type ResolveProviderOptions = TModel extends keyof GrokChatModelProviderOptionsByName ? GrokChatModelProviderOptionsByName[TModel] - : GrokProviderOptions + : GrokTextProviderOptions /** * Resolve input modalities for a specific model. diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts index ab2aac14..510aa6ab 100644 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -22,21 +22,20 @@ export interface GrokBaseOptions { * Grok-specific provider options for text/chat * Based on OpenAI-compatible API options */ -export interface GrokTextProviderOptions extends GrokBaseOptions { - /** - * Frequency penalty (-2.0 to 2.0) - */ - frequency_penalty?: number - /** - * Presence penalty (-2.0 to 2.0) - */ - presence_penalty?: number - /** - * Stop sequences - */ - stop?: string | Array -} +export interface GrokTextProviderOptions extends GrokBaseOptions {} +type ReasoningEffort = 'low' | 'high' +type ReasoningSummary = 'auto' | 'detailed' +/** + * Reasoning options for most models (excludes 'concise' summary). + */ +export interface ReasoningOptions { + reasoning?: { + effort?: ReasoningEffort + + summary?: ReasoningSummary + } +} /** * Internal options interface for validation * Used internally by the adapter From 10a3f123b87617a1f88b8bca526247552831bfd2 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 23 Dec 2025 18:22:50 +0100 Subject: [PATCH 5/9] fix grok model issues --- .../ai-grok/src/text/text-provider-options.ts | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts index 510aa6ab..abfd837a 100644 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -16,6 +16,40 @@ export interface GrokBaseOptions { * Can help xAI to monitor and detect abuse. */ user?: string + /** + * What additional output data to include in the response. Currently the only supported value is +reasoning.encrypted_content + which returns an encrypted version of the reasoning tokens. + */ + include?: Array<'reasoning.encrypted_content'> + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. + */ + logprobs?: boolean + /** + * Whether to enable parallel tool calls. + * @default true + */ + parallel_tool_calls?: boolean + + tool_choice?: + | 'none' + | 'auto' + | 'required' + | { type: 'function'; function: { [key: string]: string } } + /** + * The ID of the previous response from the model. + */ + previous_response_id?: string + /** + * Whether to store the input message(s) and model response for later retrieval. + * @default true + */ + store?: boolean + /** + * An integer between 0 and 8 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used. + */ + top_logprobs?: number } /** @@ -23,8 +57,8 @@ export interface GrokBaseOptions { * Based on OpenAI-compatible API options */ export interface GrokTextProviderOptions extends GrokBaseOptions {} -type ReasoningEffort = 'low' | 'high' -type ReasoningSummary = 'auto' | 'detailed' +type ReasoningEffort = 'low' | 'medium' | 'high' +type ReasoningSummary = 'auto' | 'detailed' | 'concise' /** * Reasoning options for most models (excludes 'concise' summary). From 2896bab1e8b49c4e998854eec0af6d4cab4bd60e Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 23 Dec 2025 18:23:25 +0100 Subject: [PATCH 6/9] fix grok model issues --- packages/typescript/ai-grok/src/text/text-provider-options.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts index abfd837a..de9f87bd 100644 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ b/packages/typescript/ai-grok/src/text/text-provider-options.ts @@ -36,7 +36,7 @@ reasoning.encrypted_content | 'none' | 'auto' | 'required' - | { type: 'function'; function: { [key: string]: string } } + | { type: 'function'; function: { name: string } } /** * The ID of the previous response from the model. */ From 584abb6a71aa4accfd8460a15d048c8c20c5088f Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Tue, 23 Dec 2025 10:51:32 -0800 Subject: [PATCH 7/9] thin shim approach --- packages/typescript/ai-grok/package.json | 2 +- .../typescript/ai-grok/src/adapters/image.ts | 156 ++---- .../ai-grok/src/adapters/summarize.ts | 160 ++---- .../typescript/ai-grok/src/adapters/text.ts | 496 ++---------------- .../src/image/image-provider-options.ts | 118 ----- packages/typescript/ai-grok/src/index.ts | 42 +- .../typescript/ai-grok/src/message-types.ts | 67 --- packages/typescript/ai-grok/src/model-meta.ts | 22 - .../ai-grok/src/text/text-provider-options.ts | 77 --- .../ai-grok/src/tools/function-tool.ts | 45 -- .../typescript/ai-grok/src/tools/index.ts | 5 - .../ai-grok/src/tools/tool-converter.ts | 17 - .../typescript/ai-grok/src/utils/client.ts | 45 -- .../typescript/ai-grok/src/utils/index.ts | 10 - .../ai-grok/src/utils/schema-converter.ts | 110 ---- .../ai-grok/tests/grok-adapter.test.ts | 301 ++++------- pnpm-lock.yaml | 11 +- 17 files changed, 243 insertions(+), 1441 deletions(-) delete mode 100644 packages/typescript/ai-grok/src/image/image-provider-options.ts delete mode 100644 packages/typescript/ai-grok/src/message-types.ts delete mode 100644 packages/typescript/ai-grok/src/text/text-provider-options.ts delete mode 100644 packages/typescript/ai-grok/src/tools/function-tool.ts delete mode 100644 packages/typescript/ai-grok/src/tools/index.ts delete mode 100644 packages/typescript/ai-grok/src/tools/tool-converter.ts delete mode 100644 packages/typescript/ai-grok/src/utils/client.ts delete mode 100644 packages/typescript/ai-grok/src/utils/index.ts delete mode 100644 packages/typescript/ai-grok/src/utils/schema-converter.ts diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index d59054cb..c3b6184a 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -40,7 +40,7 @@ "adapter" ], "dependencies": { - "openai": "^6.9.1" + "@tanstack/ai-openai": "workspace:^" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index beb5dd18..704c7130 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,129 +1,33 @@ -import { BaseImageAdapter } from '@tanstack/ai/adapters' -import { createGrokClient, generateId, getGrokApiKeyFromEnv } from '../utils' -import { - validateImageSize, - validateNumberOfImages, - validatePrompt, -} from '../image/image-provider-options' +import { createOpenaiImage } from '@tanstack/ai-openai' +import type { OpenAIImageConfig } from '@tanstack/ai-openai' import type { GROK_IMAGE_MODELS } from '../model-meta' -import type { - GrokImageModelProviderOptionsByName, - GrokImageModelSizeByName, - GrokImageProviderOptions, -} from '../image/image-provider-options' -import type { - GeneratedImage, - ImageGenerationOptions, - ImageGenerationResult, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' -import type { GrokClientConfig } from '../utils' + +const GROK_BASE_URL = 'https://api.x.ai/v1' /** * Configuration for Grok image adapter */ -export interface GrokImageConfig extends GrokClientConfig {} +export interface GrokImageConfig extends Omit { + apiKey?: string + baseURL?: string +} /** Model type for Grok Image */ export type GrokImageModel = (typeof GROK_IMAGE_MODELS)[number] /** - * Grok Image Generation Adapter - * - * Tree-shakeable adapter for Grok image generation functionality. - * Supports grok-2-image-1212 model. - * - * Features: - * - Model-specific type-safe provider options - * - Size validation per model - * - Number of images validation + * Alias for ImageProviderOptions for external use */ -export class GrokImageAdapter< - TModel extends GrokImageModel, -> extends BaseImageAdapter< - TModel, - GrokImageProviderOptions, - GrokImageModelProviderOptionsByName, - GrokImageModelSizeByName -> { - readonly kind = 'image' as const - readonly name = 'grok' as const - - private client: OpenAI_SDK - - constructor(config: GrokImageConfig, model: TModel) { - super({}, model) - this.client = createGrokClient(config) - } - - async generateImages( - options: ImageGenerationOptions, - ): Promise { - const { model, prompt, numberOfImages, size } = options - - // Validate inputs - validatePrompt({ prompt, model }) - validateImageSize(model, size) - validateNumberOfImages(model, numberOfImages) - - // Build request based on model type - const request = this.buildRequest(options) - - const response = await this.client.images.generate({ - ...request, - stream: false, - }) - - return this.transformResponse(model, response) - } - - private buildRequest( - options: ImageGenerationOptions, - ): OpenAI_SDK.Images.ImageGenerateParams { - const { model, prompt, numberOfImages, size, modelOptions } = options - - return { - model, - prompt, - n: numberOfImages ?? 1, - size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], - ...modelOptions, - } - } - - private transformResponse( - model: string, - response: OpenAI_SDK.Images.ImagesResponse, - ): ImageGenerationResult { - const images: Array = (response.data ?? []).map((item) => ({ - b64Json: item.b64_json, - url: item.url, - revisedPrompt: item.revised_prompt, - })) - - return { - id: generateId(this.name), - model, - images, - usage: response.usage - ? { - inputTokens: response.usage.input_tokens, - outputTokens: response.usage.output_tokens, - totalTokens: response.usage.total_tokens, - } - : undefined, - } - } -} +export type { OpenAIImageProviderOptions as GrokImageProviderOptions } from '@tanstack/ai-openai' /** * Creates a Grok image adapter with explicit API key. - * Type resolution happens here at the call site. + * This is a thin wrapper around OpenAI's adapter with Grok's base URL. * * @param model - The model name (e.g., 'grok-2-image-1212') * @param apiKey - Your xAI API key * @param config - Optional additional configuration - * @returns Configured Grok image adapter instance with resolved types + * @returns Configured Grok image adapter instance * * @example * ```typescript @@ -139,13 +43,17 @@ export function createGrokImage( model: TModel, apiKey: string, config?: Omit, -): GrokImageAdapter { - return new GrokImageAdapter({ apiKey, ...config }, model) +) { + // Use 'as any' for model since Grok models aren't in OpenAI's type list + // but the OpenAI-compatible API accepts any model string + return createOpenaiImage(model as any, apiKey, { + ...config, + baseURL: config?.baseURL ?? GROK_BASE_URL, + }) } /** * Creates a Grok image adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. * * Looks for `XAI_API_KEY` in: * - `process.env` (Node.js) @@ -153,7 +61,7 @@ export function createGrokImage( * * @param model - The model name (e.g., 'grok-2-image-1212') * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Grok image adapter instance with resolved types + * @returns Configured Grok image adapter instance * @throws Error if XAI_API_KEY is not found in environment * * @example @@ -170,7 +78,29 @@ export function createGrokImage( export function grokImage( model: TModel, config?: Omit, -): GrokImageAdapter { +) { const apiKey = getGrokApiKeyFromEnv() return createGrokImage(model, apiKey, config) } + +/** + * Gets Grok API key from environment variables + * @throws Error if XAI_API_KEY is not found + */ +function getGrokApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.XAI_API_KEY + + if (!key) { + throw new Error( + 'XAI_API_KEY is required. Please set it in your environment variables or use createGrokImage with an explicit API key.', + ) + } + + return key +} diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 5cd273f0..e13ebf28 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,132 +1,36 @@ -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' -import { getGrokApiKeyFromEnv } from '../utils' -import { GrokTextAdapter } from './text' -import type { GROK_CHAT_MODELS } from '../model-meta' +import { createOpenaiSummarize } from '@tanstack/ai-openai' import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' -import type { GrokClientConfig } from '../utils' + OpenAISummarizeConfig, + OpenAISummarizeProviderOptions, +} from '@tanstack/ai-openai' +import type { GROK_CHAT_MODELS } from '../model-meta' + +const GROK_BASE_URL = 'https://api.x.ai/v1' /** * Configuration for Grok summarize adapter */ -export interface GrokSummarizeConfig extends GrokClientConfig {} +export interface GrokSummarizeConfig extends Omit { + apiKey?: string + baseURL?: string +} /** * Grok-specific provider options for summarization */ -export interface GrokSummarizeProviderOptions { - /** Temperature for response generation (0-2) */ - temperature?: number - /** Maximum tokens in the response */ - maxTokens?: number -} +export type GrokSummarizeProviderOptions = OpenAISummarizeProviderOptions /** Model type for Grok summarization */ export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] -/** - * Grok Summarize Adapter - * - * A thin wrapper around the text adapter that adds summarization-specific prompting. - * Delegates all API calls to the GrokTextAdapter. - */ -export class GrokSummarizeAdapter< - TModel extends GrokSummarizeModel, -> extends BaseSummarizeAdapter { - readonly kind = 'summarize' as const - readonly name = 'grok' as const - - private textAdapter: GrokTextAdapter - - constructor(config: GrokSummarizeConfig, model: TModel) { - super({}, model) - this.textAdapter = new GrokTextAdapter(config, model) - } - - async summarize(options: SummarizationOptions): Promise { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Use the text adapter's streaming and collect the result - let summary = '' - let id = '' - let model = options.model - let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - })) { - if (chunk.type === 'content') { - summary = chunk.content - id = chunk.id - model = chunk.model - } - if (chunk.type === 'done' && chunk.usage) { - usage = chunk.usage - } - } - - return { id, model, summary, usage } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const systemPrompt = this.buildSummarizationPrompt(options) - - // Delegate directly to the text adapter's streaming - yield* this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - }) - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } - - return prompt - } -} - /** * Creates a Grok summarize adapter with explicit API key. - * Type resolution happens here at the call site. + * This is a thin wrapper around OpenAI's adapter with Grok's base URL. * * @param model - The model name (e.g., 'grok-3', 'grok-4') * @param apiKey - Your xAI API key * @param config - Optional additional configuration - * @returns Configured Grok summarize adapter instance with resolved types + * @returns Configured Grok summarize adapter instance * * @example * ```typescript @@ -137,13 +41,17 @@ export function createGrokSummarize( model: TModel, apiKey: string, config?: Omit, -): GrokSummarizeAdapter { - return new GrokSummarizeAdapter({ apiKey, ...config }, model) +) { + // Use 'as any' for model since Grok models aren't in OpenAI's type list + // but the OpenAI-compatible API accepts any model string + return createOpenaiSummarize(model as any, apiKey, { + ...config, + baseURL: config?.baseURL ?? GROK_BASE_URL, + }) } /** * Creates a Grok summarize adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. * * Looks for `XAI_API_KEY` in: * - `process.env` (Node.js) @@ -151,7 +59,7 @@ export function createGrokSummarize( * * @param model - The model name (e.g., 'grok-3', 'grok-4') * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Grok summarize adapter instance with resolved types + * @returns Configured Grok summarize adapter instance * @throws Error if XAI_API_KEY is not found in environment * * @example @@ -168,7 +76,29 @@ export function createGrokSummarize( export function grokSummarize( model: TModel, config?: Omit, -): GrokSummarizeAdapter { +) { const apiKey = getGrokApiKeyFromEnv() return createGrokSummarize(model, apiKey, config) } + +/** + * Gets Grok API key from environment variables + * @throws Error if XAI_API_KEY is not found + */ +function getGrokApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.XAI_API_KEY + + if (!key) { + throw new Error( + 'XAI_API_KEY is required. Please set it in your environment variables or use createGrokSummarize with an explicit API key.', + ) + } + + return key +} diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index bef2ffaf..7e8d301e 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,481 +1,49 @@ -import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { validateTextProviderOptions } from '../text/text-provider-options' -import { convertToolsToProviderFormat } from '../tools' -import { - createGrokClient, - generateId, - getGrokApiKeyFromEnv, - makeGrokStructuredOutputCompatible, - transformNullsToUndefined, -} from '../utils' -import type { - GROK_CHAT_MODELS, - ResolveInputModalities, - ResolveProviderOptions, -} from '../model-meta' -import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' -import type OpenAI_SDK from 'openai' -import type { - ContentPart, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' -import type { InternalTextProviderOptions } from '../text/text-provider-options' -import type { - GrokImageMetadata, - GrokMessageMetadataByModality, -} from '../message-types' -import type { GrokClientConfig } from '../utils' +import { createOpenaiChat } from '@tanstack/ai-openai' +import type { OpenAITextConfig } from '@tanstack/ai-openai' +import type { GROK_CHAT_MODELS } from '../model-meta' + +const GROK_BASE_URL = 'https://api.x.ai/v1' /** * Configuration for Grok text adapter */ -export interface GrokTextConfig extends GrokClientConfig {} +export interface GrokTextConfig extends Omit { + apiKey?: string + baseURL?: string +} /** * Alias for TextProviderOptions for external use */ -export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../text/text-provider-options' - -/** - * Grok Text (Chat) Adapter - * - * Tree-shakeable adapter for Grok chat/text completion functionality. - * Uses OpenAI-compatible Chat Completions API (not Responses API). - */ -export class GrokTextAdapter< - TModel extends (typeof GROK_CHAT_MODELS)[number], -> extends BaseTextAdapter< - TModel, - ResolveProviderOptions, - ResolveInputModalities, - GrokMessageMetadataByModality -> { - readonly kind = 'text' as const - readonly name = 'grok' as const - - private client: OpenAI_SDK - - constructor(config: GrokTextConfig, model: TModel) { - super({}, model) - this.client = createGrokClient(config) - } - - async *chatStream( - options: TextOptions>, - ): AsyncIterable { - const requestParams = this.mapTextOptionsToGrok(options) - - try { - const stream = await this.client.chat.completions.create({ - ...requestParams, - stream: true, - }) - - yield* this.processGrokStreamChunks(stream, options) - } catch (error: unknown) { - const err = error as Error - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) - throw error - } - } - - /** - * Generate structured output using Grok's JSON Schema response format. - * Uses stream: false to get the complete response in one call. - * - * Grok has strict requirements for structured output (via OpenAI-compatible API): - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for all objects - * - * The outputSchema is already JSON Schema (converted in the ai layer). - * We apply Grok-specific transformations for structured output compatibility. - */ - async structuredOutput( - options: StructuredOutputOptions>, - ): Promise> { - const { chatOptions, outputSchema } = options - const requestParams = this.mapTextOptionsToGrok(chatOptions) - - // Apply Grok-specific transformations for structured output compatibility - const jsonSchema = makeGrokStructuredOutputCompatible( - outputSchema, - outputSchema.required || [], - ) - - try { - const response = await this.client.chat.completions.create({ - ...requestParams, - stream: false, - response_format: { - type: 'json_schema', - json_schema: { - name: 'structured_output', - schema: jsonSchema, - strict: true, - }, - }, - }) - - // Extract text content from the response - const rawText = response.choices[0]?.message.content || '' - - // Parse the JSON response - let parsed: unknown - try { - parsed = JSON.parse(rawText) - } catch { - throw new Error( - `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, - ) - } - - // Transform null values to undefined to match original Zod schema expectations - // Grok returns null for optional fields we made nullable in the schema - const transformed = transformNullsToUndefined(parsed) - - return { - data: transformed, - rawText, - } - } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) - throw error - } - } - - private async *processGrokStreamChunks( - stream: AsyncIterable, - options: TextOptions, - ): AsyncIterable { - let accumulatedContent = '' - const timestamp = Date.now() - let responseId = generateId(this.name) - - // Track tool calls being streamed (arguments come in chunks) - const toolCallsInProgress = new Map< - number, - { - id: string - name: string - arguments: string - } - >() - - try { - for await (const chunk of stream) { - responseId = chunk.id || responseId - const choice = chunk.choices[0] - - if (!choice) continue - - const delta = choice.delta - const deltaContent = delta.content - const deltaToolCalls = delta.tool_calls - - // Handle content delta - if (deltaContent) { - accumulatedContent += deltaContent - yield { - type: 'content', - id: responseId, - model: chunk.model || options.model, - timestamp, - delta: deltaContent, - content: accumulatedContent, - role: 'assistant', - } - } - - // Handle tool calls - they come in as deltas - if (deltaToolCalls) { - for (const toolCallDelta of deltaToolCalls) { - const index = toolCallDelta.index - - // Initialize or update the tool call in progress - if (!toolCallsInProgress.has(index)) { - toolCallsInProgress.set(index, { - id: toolCallDelta.id || '', - name: toolCallDelta.function?.name || '', - arguments: '', - }) - } - - const toolCall = toolCallsInProgress.get(index)! - - // Update with any new data from the delta - if (toolCallDelta.id) { - toolCall.id = toolCallDelta.id - } - if (toolCallDelta.function?.name) { - toolCall.name = toolCallDelta.function.name - } - if (toolCallDelta.function?.arguments) { - toolCall.arguments += toolCallDelta.function.arguments - } - } - } - - // Handle finish reason - if (choice.finish_reason) { - // Emit all completed tool calls - if ( - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ) { - for (const [index, toolCall] of toolCallsInProgress) { - yield { - type: 'tool_call', - id: responseId, - model: chunk.model || options.model, - timestamp, - index, - toolCall: { - id: toolCall.id, - type: 'function', - function: { - name: toolCall.name, - arguments: toolCall.arguments, - }, - }, - } - } - } - - yield { - type: 'done', - id: responseId, - model: chunk.model || options.model, - timestamp, - usage: chunk.usage - ? { - promptTokens: chunk.usage.prompt_tokens || 0, - completionTokens: chunk.usage.completion_tokens || 0, - totalTokens: chunk.usage.total_tokens || 0, - } - : undefined, - finishReason: - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ? 'tool_calls' - : 'stop', - } - } - } - } catch (error: unknown) { - const err = error as Error & { code?: string } - console.log('[Grok Adapter] Stream ended with error:', err.message) - yield { - type: 'error', - id: responseId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error occurred', - code: err.code, - }, - } - } - } - - /** - * Maps common options to Grok-specific Chat Completions format - */ - private mapTextOptionsToGrok( - options: TextOptions, - ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { - const modelOptions = options.modelOptions as - | Omit< - InternalTextProviderOptions, - 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' - > - | undefined - - if (modelOptions) { - validateTextProviderOptions({ - ...modelOptions, - model: options.model, - }) - } - - const tools = options.tools - ? convertToolsToProviderFormat(options.tools) - : undefined - - // Build messages array with system prompts - const messages: Array = - [] - - // Add system prompts first - if (options.systemPrompts && options.systemPrompts.length > 0) { - messages.push({ - role: 'system', - content: options.systemPrompts.join('\n'), - }) - } - - // Convert messages - for (const message of options.messages) { - messages.push(this.convertMessageToGrok(message)) - } - - return { - model: options.model, - messages, - temperature: options.temperature, - max_tokens: options.maxTokens, - top_p: options.topP, - tools: tools as Array, - stream: true, - stream_options: { include_usage: true }, - } - } - - private convertMessageToGrok( - message: ModelMessage, - ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { - // Handle tool messages - if (message.role === 'tool') { - return { - role: 'tool', - tool_call_id: message.toolCallId || '', - content: - typeof message.content === 'string' - ? message.content - : JSON.stringify(message.content), - } - } - - // Handle assistant messages - if (message.role === 'assistant') { - const toolCalls = message.toolCalls?.map((tc) => ({ - id: tc.id, - type: 'function' as const, - function: { - name: tc.function.name, - arguments: - typeof tc.function.arguments === 'string' - ? tc.function.arguments - : JSON.stringify(tc.function.arguments), - }, - })) - - return { - role: 'assistant', - content: this.extractTextContent(message.content), - ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), - } - } - - // Handle user messages - support multimodal content - const contentParts = this.normalizeContent(message.content) - - // If only text, use simple string format - if (contentParts.length === 1 && contentParts[0]?.type === 'text') { - return { - role: 'user', - content: contentParts[0].content, - } - } - - // Otherwise, use array format for multimodal - const parts: Array = - [] - for (const part of contentParts) { - if (part.type === 'text') { - parts.push({ type: 'text', text: part.content }) - } else if (part.type === 'image') { - const imageMetadata = part.metadata as GrokImageMetadata | undefined - parts.push({ - type: 'image_url', - image_url: { - url: part.source.value, - detail: imageMetadata?.detail || 'auto', - }, - }) - } - } - - return { - role: 'user', - content: parts.length > 0 ? parts : '', - } - } - - /** - * Normalizes message content to an array of ContentPart. - * Handles backward compatibility with string content. - */ - private normalizeContent( - content: string | null | Array, - ): Array { - if (content === null) { - return [] - } - if (typeof content === 'string') { - return [{ type: 'text', content: content }] - } - return content - } - - /** - * Extracts text content from a content value that may be string, null, or ContentPart array. - */ - private extractTextContent( - content: string | null | Array, - ): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content - } - // It's an array of ContentPart - return content - .filter((p) => p.type === 'text') - .map((p) => p.content) - .join('') - } -} +export type { OpenAITextProviderOptions as GrokTextProviderOptions } from '@tanstack/ai-openai' /** * Creates a Grok text adapter with explicit API key. - * Type resolution happens here at the call site. + * This is a thin wrapper around OpenAI's adapter with Grok's base URL. * * @param model - The model name (e.g., 'grok-3', 'grok-4') * @param apiKey - Your xAI API key * @param config - Optional additional configuration - * @returns Configured Grok text adapter instance with resolved types + * @returns Configured Grok text adapter instance * * @example * ```typescript * const adapter = createGrokText('grok-3', "xai-..."); - * // adapter has type-safe providerOptions for grok-3 * ``` */ export function createGrokText< TModel extends (typeof GROK_CHAT_MODELS)[number], ->( - model: TModel, - apiKey: string, - config?: Omit, -): GrokTextAdapter { - return new GrokTextAdapter({ apiKey, ...config }, model) +>(model: TModel, apiKey: string, config?: Omit) { + // Use 'as any' for model since Grok models aren't in OpenAI's type list + // but the OpenAI-compatible API accepts any model string + return createOpenaiChat(model as any, apiKey, { + ...config, + baseURL: config?.baseURL ?? GROK_BASE_URL, + }) } /** * Creates a Grok text adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. * * Looks for `XAI_API_KEY` in: * - `process.env` (Node.js) @@ -483,7 +51,7 @@ export function createGrokText< * * @param model - The model name (e.g., 'grok-3', 'grok-4') * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Grok text adapter instance with resolved types + * @returns Configured Grok text adapter instance * @throws Error if XAI_API_KEY is not found in environment * * @example @@ -500,7 +68,29 @@ export function createGrokText< export function grokText( model: TModel, config?: Omit, -): GrokTextAdapter { +) { const apiKey = getGrokApiKeyFromEnv() return createGrokText(model, apiKey, config) } + +/** + * Gets Grok API key from environment variables + * @throws Error if XAI_API_KEY is not found + */ +function getGrokApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.XAI_API_KEY + + if (!key) { + throw new Error( + 'XAI_API_KEY is required. Please set it in your environment variables or use createGrokText with an explicit API key.', + ) + } + + return key +} diff --git a/packages/typescript/ai-grok/src/image/image-provider-options.ts b/packages/typescript/ai-grok/src/image/image-provider-options.ts deleted file mode 100644 index 9b0d9ee5..00000000 --- a/packages/typescript/ai-grok/src/image/image-provider-options.ts +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Grok Image Generation Provider Options - * - * These are provider-specific options for Grok image generation. - * Grok uses the grok-2-image-1212 model for image generation. - */ - -/** - * Supported sizes for grok-2-image-1212 model - */ -export type GrokImageSize = '1024x1024' | '1536x1024' | '1024x1536' - -/** - * Base provider options for Grok image models - */ -export interface GrokImageBaseProviderOptions { - /** - * A unique identifier representing your end-user. - * Can help xAI to monitor and detect abuse. - */ - user?: string -} - -/** - * Provider options for grok-2-image-1212 model - */ -export interface GrokImageProviderOptions extends GrokImageBaseProviderOptions { - /** - * The quality of the image. - * @default 'standard' - */ - quality?: 'standard' | 'hd' - - /** - * The format in which generated images are returned. - * URLs are only valid for 60 minutes after generation. - * @default 'url' - */ - response_format?: 'url' | 'b64_json' -} - -/** - * Type-only map from model name to its specific provider options. - */ -export type GrokImageModelProviderOptionsByName = { - 'grok-2-image-1212': GrokImageProviderOptions -} - -/** - * Type-only map from model name to its supported sizes. - */ -export type GrokImageModelSizeByName = { - 'grok-2-image-1212': GrokImageSize -} - -/** - * Internal options interface for validation - */ -interface ImageValidationOptions { - prompt: string - model: string -} - -/** - * Validates that the provided size is supported by the model. - * Throws a descriptive error if the size is not supported. - */ -export function validateImageSize( - model: string, - size: string | undefined, -): void { - if (!size) return - - const validSizes: Record> = { - 'grok-2-image-1212': ['1024x1024', '1536x1024', '1024x1536'], - } - - const modelSizes = validSizes[model] - if (!modelSizes) { - throw new Error(`Unknown image model: ${model}`) - } - - if (!modelSizes.includes(size)) { - throw new Error( - `Size "${size}" is not supported by model "${model}". ` + - `Supported sizes: ${modelSizes.join(', ')}`, - ) - } -} - -/** - * Validates that the number of images is within bounds for the model. - */ -export function validateNumberOfImages( - _model: string, - numberOfImages: number | undefined, -): void { - if (numberOfImages === undefined) return - - // grok-2-image-1212 supports 1-10 images per request - if (numberOfImages < 1 || numberOfImages > 10) { - throw new Error( - `Number of images must be between 1 and 10. Requested: ${numberOfImages}`, - ) - } -} - -export const validatePrompt = (options: ImageValidationOptions) => { - if (options.prompt.length === 0) { - throw new Error('Prompt cannot be empty.') - } - // Grok image model supports up to 4000 characters - if (options.prompt.length > 4000) { - throw new Error( - 'For grok-2-image-1212, prompt length must be less than or equal to 4000 characters.', - ) - } -} diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts index 1002a5dc..ca8f4f94 100644 --- a/packages/typescript/ai-grok/src/index.ts +++ b/packages/typescript/ai-grok/src/index.ts @@ -1,19 +1,17 @@ // ============================================================================ -// New Tree-Shakeable Adapters (Recommended) +// Thin Wrappers Around OpenAI Adapters (configured for Grok) // ============================================================================ -// Text (Chat) adapter - for chat/text completion +// Text (Chat) adapter - thin wrapper around OpenAI's adapter with Grok base URL export { - GrokTextAdapter, createGrokText, grokText, type GrokTextConfig, type GrokTextProviderOptions, } from './adapters/text' -// Summarize adapter - for text summarization +// Summarize adapter - thin wrapper around OpenAI's adapter with Grok base URL export { - GrokSummarizeAdapter, createGrokSummarize, grokSummarize, type GrokSummarizeConfig, @@ -21,35 +19,35 @@ export { type GrokSummarizeModel, } from './adapters/summarize' -// Image adapter - for image generation +// Image adapter - thin wrapper around OpenAI's adapter with Grok base URL export { - GrokImageAdapter, createGrokImage, grokImage, type GrokImageConfig, type GrokImageModel, + type GrokImageProviderOptions, } from './adapters/image' -export type { - GrokImageProviderOptions, - GrokImageModelProviderOptionsByName, -} from './image/image-provider-options' // ============================================================================ -// Type Exports +// Grok-specific Model Metadata // ============================================================================ +export { GROK_CHAT_MODELS, GROK_IMAGE_MODELS } from './model-meta' export type { GrokChatModelProviderOptionsByName, GrokModelInputModalitiesByName, - ResolveProviderOptions, - ResolveInputModalities, + GrokProviderOptions, } from './model-meta' -export { GROK_CHAT_MODELS, GROK_IMAGE_MODELS } from './model-meta' + +// ============================================================================ +// Re-exported Types from OpenAI (for convenience) +// ============================================================================ + export type { - GrokTextMetadata, - GrokImageMetadata, - GrokAudioMetadata, - GrokVideoMetadata, - GrokDocumentMetadata, - GrokMessageMetadataByModality, -} from './message-types' + OpenAITextMetadata as GrokTextMetadata, + OpenAIImageMetadata as GrokImageMetadata, + OpenAIAudioMetadata as GrokAudioMetadata, + OpenAIVideoMetadata as GrokVideoMetadata, + OpenAIDocumentMetadata as GrokDocumentMetadata, + OpenAIMessageMetadataByModality as GrokMessageMetadataByModality, +} from '@tanstack/ai-openai' diff --git a/packages/typescript/ai-grok/src/message-types.ts b/packages/typescript/ai-grok/src/message-types.ts deleted file mode 100644 index ec9e7b18..00000000 --- a/packages/typescript/ai-grok/src/message-types.ts +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Grok-specific metadata types for multimodal content parts. - * These types extend the base ContentPart metadata with Grok-specific options. - * - * Grok uses an OpenAI-compatible API, so metadata types are similar to OpenAI. - * - * @see https://docs.x.ai - */ - -/** - * Metadata for Grok image content parts. - * Controls how the model processes and analyzes images. - */ -export interface GrokImageMetadata { - /** - * Controls how the model processes the image. - * - 'auto': Let the model decide based on image size and content - * - 'low': Use low resolution processing (faster, cheaper, less detail) - * - 'high': Use high resolution processing (slower, more expensive, more detail) - * - * @default 'auto' - */ - detail?: 'auto' | 'low' | 'high' -} - -/** - * Metadata for Grok audio content parts. - * Specifies the audio format for proper processing. - */ -export interface GrokAudioMetadata { - /** - * The format of the audio. - * Supported formats: mp3, wav, flac, etc. - * @default 'mp3' - */ - format?: 'mp3' | 'wav' | 'flac' | 'ogg' | 'webm' | 'aac' -} - -/** - * Metadata for Grok video content parts. - * Note: Video support in Grok is limited; check current API capabilities. - */ -export interface GrokVideoMetadata {} - -/** - * Metadata for Grok document content parts. - * Note: Direct document support may vary; PDFs often need to be converted to images. - */ -export interface GrokDocumentMetadata {} - -/** - * Metadata for Grok text content parts. - * Currently no specific metadata options for text in Grok. - */ -export interface GrokTextMetadata {} - -/** - * Map of modality types to their Grok-specific metadata types. - * Used for type inference when constructing multimodal messages. - */ -export interface GrokMessageMetadataByModality { - text: GrokTextMetadata - image: GrokImageMetadata - audio: GrokAudioMetadata - video: GrokVideoMetadata - document: GrokDocumentMetadata -} diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts index fc89a098..938fa978 100644 --- a/packages/typescript/ai-grok/src/model-meta.ts +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -60,25 +60,3 @@ export interface GrokProviderOptions { /** A unique identifier representing your end-user */ user?: string } - -// =========================== -// Type Resolution Helpers -// =========================== - -/** - * Resolve provider options for a specific model. - * If the model has explicit options in the map, use those; otherwise use base options. - */ -export type ResolveProviderOptions = - TModel extends keyof GrokChatModelProviderOptionsByName - ? GrokChatModelProviderOptionsByName[TModel] - : GrokProviderOptions - -/** - * Resolve input modalities for a specific model. - * If the model has explicit modalities in the map, use those; otherwise use text only. - */ -export type ResolveInputModalities = - TModel extends keyof GrokModelInputModalitiesByName - ? GrokModelInputModalitiesByName[TModel] - : readonly ['text'] diff --git a/packages/typescript/ai-grok/src/text/text-provider-options.ts b/packages/typescript/ai-grok/src/text/text-provider-options.ts deleted file mode 100644 index a05222ff..00000000 --- a/packages/typescript/ai-grok/src/text/text-provider-options.ts +++ /dev/null @@ -1,77 +0,0 @@ -import type { FunctionTool } from '../tools/function-tool' - -/** - * Grok Text Provider Options - * - * Grok uses an OpenAI-compatible Chat Completions API. - * However, not all OpenAI features may be supported by Grok. - */ - -/** - * Base provider options for Grok text/chat models - */ -export interface GrokBaseOptions { - /** - * A unique identifier representing your end-user. - * Can help xAI to monitor and detect abuse. - */ - user?: string -} - -/** - * Grok-specific provider options for text/chat - * Based on OpenAI-compatible API options - */ -export interface GrokTextProviderOptions extends GrokBaseOptions { - /** - * Temperature for response generation (0-2) - * Higher values make output more random, lower values more focused - */ - temperature?: number - /** - * Top-p sampling parameter (0-1) - * Alternative to temperature, nucleus sampling - */ - top_p?: number - /** - * Maximum tokens in the response - */ - max_tokens?: number - /** - * Frequency penalty (-2.0 to 2.0) - */ - frequency_penalty?: number - /** - * Presence penalty (-2.0 to 2.0) - */ - presence_penalty?: number - /** - * Stop sequences - */ - stop?: string | Array -} - -/** - * Internal options interface for validation - * Used internally by the adapter - */ -export interface InternalTextProviderOptions extends GrokTextProviderOptions { - model: string - stream?: boolean - tools?: Array -} - -/** - * External provider options (what users pass in) - */ -export type ExternalTextProviderOptions = GrokTextProviderOptions - -/** - * Validates text provider options - */ -export function validateTextProviderOptions( - _options: InternalTextProviderOptions, -): void { - // Basic validation can be added here if needed - // For now, Grok API will handle validation -} diff --git a/packages/typescript/ai-grok/src/tools/function-tool.ts b/packages/typescript/ai-grok/src/tools/function-tool.ts deleted file mode 100644 index 646fb895..00000000 --- a/packages/typescript/ai-grok/src/tools/function-tool.ts +++ /dev/null @@ -1,45 +0,0 @@ -import { makeGrokStructuredOutputCompatible } from '../utils/schema-converter' -import type { JSONSchema, Tool } from '@tanstack/ai' -import type OpenAI from 'openai' - -// Use Chat Completions API tool format (not Responses API) -export type FunctionTool = OpenAI.Chat.Completions.ChatCompletionTool - -/** - * Converts a standard Tool to Grok ChatCompletionTool format. - * - * Tool schemas are already converted to JSON Schema in the ai layer. - * We apply Grok-specific transformations for strict mode: - * - All properties in required array - * - Optional fields made nullable - * - additionalProperties: false - * - * This enables strict mode for all tools automatically. - */ -export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { - // Tool schemas are already converted to JSON Schema in the ai layer - // Apply Grok-specific transformations for strict mode - const inputSchema = (tool.inputSchema ?? { - type: 'object', - properties: {}, - required: [], - }) as JSONSchema - - const jsonSchema = makeGrokStructuredOutputCompatible( - inputSchema, - inputSchema.required || [], - ) - - // Ensure additionalProperties is false for strict mode - jsonSchema.additionalProperties = false - - return { - type: 'function', - function: { - name: tool.name, - description: tool.description, - parameters: jsonSchema, - strict: true, // Always use strict mode since our schema converter handles the requirements - }, - } satisfies FunctionTool -} diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts deleted file mode 100644 index c9033415..00000000 --- a/packages/typescript/ai-grok/src/tools/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -export { - convertFunctionToolToAdapterFormat, - type FunctionTool, -} from './function-tool' -export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-grok/src/tools/tool-converter.ts b/packages/typescript/ai-grok/src/tools/tool-converter.ts deleted file mode 100644 index 969fdb72..00000000 --- a/packages/typescript/ai-grok/src/tools/tool-converter.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { convertFunctionToolToAdapterFormat } from './function-tool' -import type { FunctionTool } from './function-tool' -import type { Tool } from '@tanstack/ai' - -/** - * Converts an array of standard Tools to Grok-specific format - * Grok uses OpenAI-compatible API, so we primarily support function tools - */ -export function convertToolsToProviderFormat( - tools: Array, -): Array { - return tools.map((tool) => { - // For Grok, all tools are converted as function tools - // Grok uses OpenAI-compatible API which primarily supports function tools - return convertFunctionToolToAdapterFormat(tool) - }) -} diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts deleted file mode 100644 index 2a559076..00000000 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ /dev/null @@ -1,45 +0,0 @@ -import OpenAI_SDK from 'openai' - -export interface GrokClientConfig { - apiKey: string - baseURL?: string -} - -/** - * Creates a Grok SDK client instance using OpenAI SDK with xAI's base URL - */ -export function createGrokClient(config: GrokClientConfig): OpenAI_SDK { - return new OpenAI_SDK({ - apiKey: config.apiKey, - baseURL: config.baseURL || 'https://api.x.ai/v1', - }) -} - -/** - * Gets Grok API key from environment variables - * @throws Error if XAI_API_KEY is not found - */ -export function getGrokApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.XAI_API_KEY - - if (!key) { - throw new Error( - 'XAI_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', - ) - } - - return key -} - -/** - * Generates a unique ID with a prefix - */ -export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` -} diff --git a/packages/typescript/ai-grok/src/utils/index.ts b/packages/typescript/ai-grok/src/utils/index.ts deleted file mode 100644 index 72c2f529..00000000 --- a/packages/typescript/ai-grok/src/utils/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -export { - createGrokClient, - getGrokApiKeyFromEnv, - generateId, - type GrokClientConfig, -} from './client' -export { - makeGrokStructuredOutputCompatible, - transformNullsToUndefined, -} from './schema-converter' diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts deleted file mode 100644 index 38c345e2..00000000 --- a/packages/typescript/ai-grok/src/utils/schema-converter.ts +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Recursively transform null values to undefined in an object. - * - * This is needed because Grok's structured output (via OpenAI-compatible API) requires all fields to be - * in the `required` array, with optional fields made nullable (type: ["string", "null"]). - * When Grok returns null for optional fields, we need to convert them back to - * undefined to match the original Zod schema expectations. - * - * @param obj - Object to transform - * @returns Object with nulls converted to undefined - */ -export function transformNullsToUndefined(obj: T): T { - if (obj === null) { - return undefined as unknown as T - } - - if (Array.isArray(obj)) { - return obj.map((item) => transformNullsToUndefined(item)) as unknown as T - } - - if (typeof obj === 'object') { - const result: Record = {} - for (const [key, value] of Object.entries(obj as Record)) { - const transformed = transformNullsToUndefined(value) - // Only include the key if the value is not undefined - // This makes { notes: null } become {} (field absent) instead of { notes: undefined } - if (transformed !== undefined) { - result[key] = transformed - } - } - return result as T - } - - return obj -} - -/** - * Transform a JSON schema to be compatible with Grok's structured output requirements (OpenAI-compatible). - * Grok requires: - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for objects - * - * @param schema - JSON schema to transform - * @param originalRequired - Original required array (to know which fields were optional) - * @returns Transformed schema compatible with Grok structured output - */ -export function makeGrokStructuredOutputCompatible( - schema: Record, - originalRequired: Array = [], -): Record { - const result = { ...schema } - - // Handle object types - if (result.type === 'object' && result.properties) { - const properties = { ...result.properties } - const allPropertyNames = Object.keys(properties) - - // Transform each property - for (const propName of allPropertyNames) { - const prop = properties[propName] - const wasOptional = !originalRequired.includes(propName) - - // Recursively transform nested objects/arrays - if (prop.type === 'object' && prop.properties) { - properties[propName] = makeGrokStructuredOutputCompatible( - prop, - prop.required || [], - ) - } else if (prop.type === 'array' && prop.items) { - properties[propName] = { - ...prop, - items: makeGrokStructuredOutputCompatible( - prop.items, - prop.items.required || [], - ), - } - } else if (wasOptional) { - // Make optional fields nullable by adding null to the type - if (prop.type && !Array.isArray(prop.type)) { - properties[propName] = { - ...prop, - type: [prop.type, 'null'], - } - } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { - properties[propName] = { - ...prop, - type: [...prop.type, 'null'], - } - } - } - } - - result.properties = properties - // ALL properties must be required for Grok structured output - result.required = allPropertyNames - // additionalProperties must be false - result.additionalProperties = false - } - - // Handle array types with object items - if (result.type === 'array' && result.items) { - result.items = makeGrokStructuredOutputCompatible( - result.items, - result.items.required || [], - ) - } - - return result -} diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 4730a0f1..2bf90e32 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -1,222 +1,97 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest' -import { chat, type Tool, type StreamChunk } from '@tanstack/ai' -import { GrokTextAdapter } from '../src/adapters/text' - -const createAdapter = () => - new GrokTextAdapter({ apiKey: 'test-key' }, 'grok-3') - -const toolArguments = JSON.stringify({ location: 'Berlin' }) - -const weatherTool: Tool = { - name: 'lookup_weather', - description: 'Return the forecast for a location', -} - -function createMockChatCompletionsStream( - chunks: Array>, -): AsyncIterable> { - return { - async *[Symbol.asyncIterator]() { - for (const chunk of chunks) { - yield chunk - } - }, - } -} - -describe('Grok adapter option mapping', () => { - beforeEach(() => { - vi.clearAllMocks() +import { describe, it, expect, vi, afterEach } from 'vitest' +import { createGrokText, grokText } from '../src/adapters/text' +import { createGrokImage, grokImage } from '../src/adapters/image' +import { createGrokSummarize, grokSummarize } from '../src/adapters/summarize' + +describe('Grok adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() }) - it('maps options into the Chat Completions API payload', async () => { - // Mock the Chat Completions API stream format - const mockStream = createMockChatCompletionsStream([ - { - id: 'chatcmpl-123', - model: 'grok-3', - choices: [ - { - index: 0, - delta: { - role: 'assistant', - content: 'It is sunny', - }, - }, - ], - }, - { - id: 'chatcmpl-123', - model: 'grok-3', - choices: [ - { - index: 0, - delta: {}, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: 12, - completion_tokens: 4, - total_tokens: 16, - }, - }, - ]) - - const chatCompletionsCreate = vi.fn().mockResolvedValueOnce(mockStream) - - const adapter = createAdapter() - // Replace the internal OpenAI SDK client with our mock - ;(adapter as any).client = { - chat: { - completions: { - create: chatCompletionsCreate, - }, - }, - } - - const chunks: StreamChunk[] = [] - for await (const chunk of chat({ - adapter, - messages: [ - { role: 'user', content: 'How is the weather?' }, - { - role: 'assistant', - content: 'Let me check', - toolCalls: [ - { - id: 'call_weather', - type: 'function', - function: { name: 'lookup_weather', arguments: toolArguments }, - }, - ], - }, - { role: 'tool', toolCallId: 'call_weather', content: '{"temp":72}' }, - ], - systemPrompts: ['Stay concise'], - tools: [weatherTool], - temperature: 0.25, - topP: 0.6, - maxTokens: 1024, - })) { - chunks.push(chunk) - } - - expect(chatCompletionsCreate).toHaveBeenCalledTimes(1) - const [payload] = chatCompletionsCreate.mock.calls[0] - - // Chat Completions API format - expect(payload).toMatchObject({ - model: 'grok-3', - temperature: 0.25, - top_p: 0.6, - max_tokens: 1024, - stream: true, + describe('Text adapter', () => { + it('creates a text adapter with explicit API key', () => { + const adapter = createGrokText('grok-3', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('openai') // Underlying adapter is OpenAI + expect(adapter.model).toBe('grok-3') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokText('grok-4') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('grok-4') + }) + + it('throws if XAI_API_KEY is not set when using grokText', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokText('grok-3')).toThrow('XAI_API_KEY is required') }) - // Chat Completions API uses 'messages' array - expect(payload.messages).toBeDefined() - expect(Array.isArray(payload.messages)).toBe(true) + it('allows custom baseURL override', () => { + const adapter = createGrokText('grok-3', 'test-api-key', { + baseURL: 'https://custom.api.example.com/v1', + }) - // Verify tools are included - expect(payload.tools).toBeDefined() - expect(Array.isArray(payload.tools)).toBe(true) - expect(payload.tools.length).toBeGreaterThan(0) + expect(adapter).toBeDefined() + }) }) - it('handles tool calls in streaming response', async () => { - // Mock the Chat Completions API stream with tool calls - const mockStream = createMockChatCompletionsStream([ - { - id: 'chatcmpl-123', - model: 'grok-3', - choices: [ - { - index: 0, - delta: { - role: 'assistant', - content: null, - tool_calls: [ - { - index: 0, - id: 'call_123', - type: 'function', - function: { - name: 'lookup_weather', - arguments: '', - }, - }, - ], - }, - }, - ], - }, - { - id: 'chatcmpl-123', - model: 'grok-3', - choices: [ - { - index: 0, - delta: { - tool_calls: [ - { - index: 0, - function: { - arguments: '{"location": "Berlin"}', - }, - }, - ], - }, - }, - ], - }, - { - id: 'chatcmpl-123', - model: 'grok-3', - choices: [ - { - index: 0, - delta: {}, - finish_reason: 'tool_calls', - }, - ], - usage: { - prompt_tokens: 12, - completion_tokens: 8, - total_tokens: 20, - }, - }, - ]) - - const chatCompletionsCreate = vi.fn().mockResolvedValueOnce(mockStream) - - const adapter = createAdapter() - ;(adapter as any).client = { - chat: { - completions: { - create: chatCompletionsCreate, - }, - }, - } - - const chunks: StreamChunk[] = [] - for await (const chunk of chat({ - adapter, - messages: [{ role: 'user', content: 'How is the weather in Berlin?' }], - tools: [weatherTool], - })) { - chunks.push(chunk) - } - - // Verify we got a tool_call chunk - const toolCallChunks = chunks.filter((c) => c.type === 'tool_call') - expect(toolCallChunks.length).toBeGreaterThan(0) - - // Verify done chunk has tool_calls finish reason - const doneChunk = chunks.find((c) => c.type === 'done') - expect(doneChunk).toBeDefined() - if (doneChunk && doneChunk.type === 'done') { - expect(doneChunk.finishReason).toBe('tool_calls') - } + describe('Image adapter', () => { + it('creates an image adapter with explicit API key', () => { + const adapter = createGrokImage('grok-2-image-1212', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('image') + expect(adapter.name).toBe('openai') // Underlying adapter is OpenAI + expect(adapter.model).toBe('grok-2-image-1212') + }) + + it('creates an image adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokImage('grok-2-image-1212') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('image') + }) + + it('throws if XAI_API_KEY is not set when using grokImage', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokImage('grok-2-image-1212')).toThrow('XAI_API_KEY is required') + }) + }) + + describe('Summarize adapter', () => { + it('creates a summarize adapter with explicit API key', () => { + const adapter = createGrokSummarize('grok-3', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('summarize') + expect(adapter.name).toBe('openai') // Underlying adapter is OpenAI + expect(adapter.model).toBe('grok-3') + }) + + it('creates a summarize adapter from environment variable', () => { + vi.stubEnv('XAI_API_KEY', 'env-api-key') + + const adapter = grokSummarize('grok-4') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('summarize') + }) + + it('throws if XAI_API_KEY is not set when using grokSummarize', () => { + vi.stubEnv('XAI_API_KEY', '') + + expect(() => grokSummarize('grok-3')).toThrow('XAI_API_KEY is required') + }) }) }) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ff102908..687b17b7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -703,9 +703,9 @@ importers: '@tanstack/ai': specifier: workspace:^ version: link:../ai - openai: - specifier: ^6.9.1 - version: 6.10.0(ws@8.18.3)(zod@4.2.1) + '@tanstack/ai-openai': + specifier: workspace:^ + version: link:../ai-openai zod: specifier: ^4.0.0 version: 4.2.1 @@ -13764,11 +13764,6 @@ snapshots: ws: 8.18.3 zod: 4.1.13 - openai@6.10.0(ws@8.18.3)(zod@4.2.1): - optionalDependencies: - ws: 8.18.3 - zod: 4.2.1 - optionator@0.9.4: dependencies: deep-is: 0.1.4 From 81658f1ee106d47e9283b11f6da302f9e86eca4f Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 23 Dec 2025 23:48:14 +0000 Subject: [PATCH 8/9] ci: apply automated fixes --- packages/typescript/ai-grok/tests/grok-adapter.test.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 6e28e631..09373f50 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -65,7 +65,9 @@ describe('Grok adapters', () => { it('throws if XAI_API_KEY is not set when using grokImage', () => { vi.stubEnv('XAI_API_KEY', '') - expect(() => grokImage('grok-2-image-1212')).toThrow('XAI_API_KEY is required') + expect(() => grokImage('grok-2-image-1212')).toThrow( + 'XAI_API_KEY is required', + ) }) }) From 27b2e8a3c3742646234d16b8dbcfc3fe28da3b1c Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Wed, 24 Dec 2025 11:46:50 +0100 Subject: [PATCH 9/9] update grok models --- docs/adapters/grok.md | 41 ++++--------------- packages/typescript/ai-grok/src/model-meta.ts | 12 +++--- 2 files changed, 14 insertions(+), 39 deletions(-) diff --git a/docs/adapters/grok.md b/docs/adapters/grok.md index 8701c153..9e3afe4b 100644 --- a/docs/adapters/grok.md +++ b/docs/adapters/grok.md @@ -19,7 +19,7 @@ import { chat } from "@tanstack/ai"; import { grokText } from "@tanstack/ai-grok"; const stream = chat({ - adapter: grokText("grok-4-0709"), + adapter: grokText("grok-4"), messages: [{ role: "user", content: "Hello!" }], }); ``` @@ -30,7 +30,7 @@ const stream = chat({ import { chat } from "@tanstack/ai"; import { createGrokText } from "@tanstack/ai-grok"; -const adapter = createGrokText("grok-4-0709", process.env.XAI_API_KEY!); +const adapter = createGrokText("grok-4", process.env.XAI_API_KEY!); const stream = chat({ adapter, @@ -47,7 +47,7 @@ const config: Omit = { baseURL: "https://api.x.ai/v1", // Optional, this is the default }; -const adapter = createGrokText("grok-4-0709", process.env.XAI_API_KEY!, config); +const adapter = createGrokText("grok-4", process.env.XAI_API_KEY!, config); ``` ## Example: Chat Completion @@ -60,7 +60,7 @@ export async function POST(request: Request) { const { messages } = await request.json(); const stream = chat({ - adapter: grokText("grok-4-0709"), + adapter: grokText("grok-4"), messages, }); @@ -89,7 +89,7 @@ const getWeather = getWeatherDef.server(async ({ location }) => { }); const stream = chat({ - adapter: grokText("grok-4.1-fast-reasoning"), + adapter: grokText("grok-4-1-fast-reasoning"), messages, tools: [getWeather], }); @@ -101,12 +101,9 @@ Grok supports various provider-specific options: ```typescript const stream = chat({ - adapter: grokText("grok-4-0709"), + adapter: grokText("grok-4"), messages, modelOptions: { - temperature: 0.7, - max_tokens: 4096, - top_p: 0.9, frequency_penalty: 0.5, presence_penalty: 0.5, stop: ["END"], @@ -114,28 +111,6 @@ const stream = chat({ }); ``` -## Available Models - -### Chat/Text Models - -| Model | Context Window | Capabilities | -|-------|----------------|--------------| -| `grok-4.1-fast-reasoning` | 2M tokens | Reasoning, Tool Calling, Structured Outputs | -| `grok-4.1-fast-non-reasoning` | 2M tokens | Tool Calling, Structured Outputs | -| `grok-code-fast-1` | 2M tokens | Code-optimized | -| `grok-4-fast-reasoning` | 131K tokens | Reasoning, Tool Calling | -| `grok-4-fast-non-reasoning` | 131K tokens | Tool Calling | -| `grok-4-0709` | 131K tokens | Tool Calling, Structured Outputs | -| `grok-3` | 131K tokens | Tool Calling, Structured Outputs | -| `grok-3-mini` | 131K tokens | Tool Calling, Structured Outputs | -| `grok-2-vision-1212` | 32K tokens | Vision, Tool Calling | - -### Image Models - -| Model | Description | -|-------|-------------| -| `grok-2-image-1212` | Image generation | - ## Summarization Summarize long text content: @@ -145,7 +120,7 @@ import { summarize } from "@tanstack/ai"; import { grokSummarize } from "@tanstack/ai-grok"; const result = await summarize({ - adapter: grokSummarize("grok-4-0709"), + adapter: grokSummarize("grok-4"), text: "Your long text to summarize...", maxLength: 100, style: "concise", // "concise" | "bullet-points" | "paragraph" @@ -203,7 +178,7 @@ Creates a Grok text adapter using environment variables. **Parameters:** -- `model` - The model name (e.g., `'grok-4-0709'`, `'grok-4.1-fast-reasoning'`) +- `model` - The model name (e.g., `'grok-4'`, `'grok-4-1-fast-reasoning'`) - `config.baseURL?` - Custom base URL (optional) **Returns:** A Grok text adapter instance. diff --git a/packages/typescript/ai-grok/src/model-meta.ts b/packages/typescript/ai-grok/src/model-meta.ts index ccaf56ef..c097d9ba 100644 --- a/packages/typescript/ai-grok/src/model-meta.ts +++ b/packages/typescript/ai-grok/src/model-meta.ts @@ -24,7 +24,7 @@ interface ModelMeta { } const GROK_4_1_FAST_REASONING = { - name: 'grok-4.1-fast-reasoning', + name: 'grok-4-1-fast-reasoning', context_window: 2_000_000, supports: { input: ['text', 'image'], @@ -43,7 +43,7 @@ const GROK_4_1_FAST_REASONING = { } as const satisfies ModelMeta const GROK_4_1_FAST_NON_REASONING = { - name: 'grok-4.1-fast-non-reasoning', + name: 'grok-4-1-fast-non-reasoning', context_window: 2_000_000, supports: { input: ['text', 'image'], @@ -118,8 +118,8 @@ const GROK_4_FAST_NON_REASONING = { }, } as const satisfies ModelMeta -const GROK_4_0709 = { - name: 'grok-4-0709', +const GROK_4 = { + name: 'grok-4', context_window: 256_000, supports: { input: ['text', 'image'], @@ -219,7 +219,7 @@ export const GROK_CHAT_MODELS = [ GROK_CODE_FAST_1.name, GROK_4_FAST_REASONING.name, GROK_4_FAST_NON_REASONING.name, - GROK_4_0709.name, + GROK_4.name, GROK_3.name, GROK_3_MINI.name, GROK_2_VISION.name, @@ -240,7 +240,7 @@ export type GrokModelInputModalitiesByName = { [GROK_CODE_FAST_1.name]: typeof GROK_CODE_FAST_1.supports.input [GROK_4_FAST_REASONING.name]: typeof GROK_4_FAST_REASONING.supports.input [GROK_4_FAST_NON_REASONING.name]: typeof GROK_4_FAST_NON_REASONING.supports.input - [GROK_4_0709.name]: typeof GROK_4_0709.supports.input + [GROK_4.name]: typeof GROK_4.supports.input [GROK_3.name]: typeof GROK_3.supports.input [GROK_3_MINI.name]: typeof GROK_3_MINI.supports.input [GROK_2_VISION.name]: typeof GROK_2_VISION.supports.input