From fbe9b9004f44ced244ae2f7117ad126bd91efc70 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 9 Mar 2026 11:43:33 +0100 Subject: [PATCH 1/9] add description attribute to tool calls --- .../suites/tracing/vercelai/scenario.mjs | 1 + .../suites/tracing/vercelai/test.ts | 5 ++- .../suites/tracing/vercelai/v5/scenario.mjs | 1 + .../suites/tracing/vercelai/v5/test.ts | 5 ++- .../suites/tracing/vercelai/v6/scenario.mjs | 1 + .../suites/tracing/vercelai/v6/test.ts | 5 ++- .../core/src/tracing/ai/gen-ai-attributes.ts | 6 +++ packages/core/src/tracing/vercel-ai/index.ts | 41 +++++++++++++++++-- 8 files changed, 58 insertions(+), 7 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs index 9bfdd4a9793a..b6abe6fdf673 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs @@ -51,6 +51,7 @@ async function run() { }), tools: { getWeather: { + description: 'Get the current weather for a location', parameters: z.object({ location: z.string() }), execute: async args => { return `Weather in ${args.location}: Sunny, 72°F`; diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 2919815b8f0d..f57d46f02c16 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -15,6 +15,7 @@ import { GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -204,6 +205,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -220,7 +222,7 @@ describe('Vercel AI integration', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -447,6 +449,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs index 9ef1b8000741..2c83234064ae 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs @@ -47,6 +47,7 @@ async function run() { }), tools: { getWeather: tool({ + description: 'Get the current weather for a location', inputSchema: z.object({ location: z.string() }), execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 7d981a878363..5365d9425ce4 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -14,6 +14,7 @@ import { GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -200,6 +201,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -215,7 +217,7 @@ describe('Vercel AI integration (V5)', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -406,6 +408,7 @@ describe('Vercel AI integration (V5)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs index 66233d1dabe5..ee2dc802cd9c 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs @@ -62,6 +62,7 @@ async function run() { }), tools: { getWeather: tool({ + description: 'Get the current weather for a location', inputSchema: z.object({ location: z.string() }), execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 2a213f39410d..110d0aeab5a4 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -13,6 +13,7 @@ import { GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -203,6 +204,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -218,7 +220,7 @@ describe('Vercel AI integration (V6)', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -409,6 +411,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index dc88e6315852..bfe81dfd57c5 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -269,6 +269,12 @@ export const GEN_AI_TOOL_INPUT_ATTRIBUTE = 'gen_ai.tool.input'; */ export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; +/** + * The description of the tool being used + * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-description + */ +export const GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE = 'gen_ai.tool.description'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 919c06eb12d6..2f35178e759c 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -7,9 +7,11 @@ import { spanToJSON } from '../../utils/spanUtils'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -124,13 +126,21 @@ function vercelAiEventProcessor(event: Event): Event { accumulateTokensForParent(span, tokenAccumulator); } - // Second pass: apply accumulated token data to parent spans + // Second pass: apply tool descriptions and accumulated tokens for (const span of event.spans) { - if (span.op !== 'gen_ai.invoke_agent') { - continue; + if (span.op === 'gen_ai.execute_tool') { + const toolName = span.data[GEN_AI_TOOL_NAME_ATTRIBUTE]; + if (typeof toolName === 'string') { + const description = findToolDescription(event.spans, toolName); + if (description) { + span.data[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE] = description; + } + } } - applyAccumulatedTokens(span, tokenAccumulator); + if (span.op === 'gen_ai.invoke_agent') { + applyAccumulatedTokens(span, tokenAccumulator); + } } // Also apply to root when it is the invoke_agent pipeline @@ -142,6 +152,29 @@ function vercelAiEventProcessor(event: Event): Event { return event; } + +/** + * Finds a tool description by scanning spans for gen_ai.request.available_tools + * (already processed from ai.prompt.tools in the first pass). + */ +function findToolDescription(spans: SpanJSON[], toolName: string): string | undefined { + for (const span of spans) { + const availableTools = span.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]; + if (typeof availableTools !== 'string') { + continue; + } + try { + const tools = JSON.parse(availableTools) as Array<{ name?: string; description?: string }>; + const tool = tools.find(t => t.name === toolName); + if (tool?.description) { + return tool.description; + } + } catch { + // ignore + } + } + return undefined; +} /** * Post-process spans emitted by the Vercel AI SDK. */ From 8aec90bf0bd79cc4f91d3f5ef2edf5e1ccabbc3f Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 9 Mar 2026 15:36:49 +0100 Subject: [PATCH 2/9] update with GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE --- .../core/src/tracing/ai/gen-ai-attributes.ts | 8 ++ packages/core/src/tracing/vercel-ai/index.ts | 83 +++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index bfe81dfd57c5..4f8d4b0161c2 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -126,6 +126,14 @@ export const GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE = 'sentry.sdk_meta. */ export const GEN_AI_INPUT_MESSAGES_ATTRIBUTE = 'gen_ai.input.messages'; +/** + * The model's response messages including text and tool calls + * Only recorded when recordOutputs is enabled + * Format: stringified array of message objects with role, parts, and finish_reason + * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-output-messages + */ +export const GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE = 'gen_ai.output.messages'; + /** * The system instructions extracted from system messages * Only recorded when recordInputs is enabled diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 2f35178e759c..7ecdd573b46a 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -7,6 +7,7 @@ import { spanToJSON } from '../../utils/spanUtils'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, @@ -44,6 +45,7 @@ import { AI_OPERATION_ID_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE, AI_PROMPT_TOOLS_ATTRIBUTE, + AI_RESPONSE_FINISH_REASON_ATTRIBUTE, AI_RESPONSE_OBJECT_ATTRIBUTE, AI_RESPONSE_PROVIDER_METADATA_ATTRIBUTE, AI_RESPONSE_TEXT_ATTRIBUTE, @@ -175,6 +177,83 @@ function findToolDescription(spans: SpanJSON[], toolName: string): string | unde } return undefined; } + +/** + * Tool call structure from Vercel AI SDK + * Note: Vercel AI uses 'input' for arguments in ai.response.toolCalls + */ +interface VercelToolCall { + toolCallId: string; + toolName: string; + input: Record; +} + +/** + * Build gen_ai.output.messages from ai.response.text and/or ai.response.toolCalls + * + * Format follows OpenTelemetry semantic conventions: + * [{"role": "assistant", "parts": [...], "finish_reason": "stop"}] + * + * Parts can be: + * - {"type": "text", "content": "..."} + * - {"type": "tool_call", "id": "...", "name": "...", "arguments": "..."} + */ +function buildOutputMessages(attributes: Record): void { + const responseText = attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + const responseToolCalls = attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + const finishReason = attributes[AI_RESPONSE_FINISH_REASON_ATTRIBUTE]; + + // Skip if neither text nor tool calls are present + if (responseText == null && responseToolCalls == null) { + return; + } + + const parts: Array> = []; + + // Add text part if present + if (typeof responseText === 'string' && responseText.length > 0) { + parts.push({ + type: 'text', + content: responseText, + }); + } + + // Add tool call parts if present + if (responseToolCalls != null) { + try { + // Tool calls can be a string (JSON) or already parsed array + const toolCalls: VercelToolCall[] = + typeof responseToolCalls === 'string' ? JSON.parse(responseToolCalls) : responseToolCalls; + + if (Array.isArray(toolCalls)) { + for (const toolCall of toolCalls) { + // Vercel AI SDK uses 'input' for tool call arguments + const args = toolCall.input; + parts.push({ + type: 'tool_call', + id: toolCall.toolCallId, + name: toolCall.toolName, + arguments: typeof args === 'string' ? args : JSON.stringify(args), + }); + } + } + } catch { + // Ignore parsing errors + } + } + + // Only set if we have parts + if (parts.length > 0) { + const outputMessage = { + role: 'assistant', + parts, + finish_reason: typeof finishReason === 'string' ? finishReason : 'stop', + }; + + attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE] = JSON.stringify([outputMessage]); + } +} + /** * Post-process spans emitted by the Vercel AI SDK. */ @@ -229,6 +308,10 @@ function processEndedVercelAiSpan(span: SpanJSON): void { delete attributes[OPERATION_NAME_ATTRIBUTE]; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE); + + // Build gen_ai.output.messages from response text and/or tool calls + buildOutputMessages(attributes); + renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); From 29459934702fe3bda7f1a99ce81ef083a6438626 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 9 Mar 2026 15:42:03 +0100 Subject: [PATCH 3/9] check for vercel media types --- .../core/src/tracing/ai/mediaStripping.ts | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/packages/core/src/tracing/ai/mediaStripping.ts b/packages/core/src/tracing/ai/mediaStripping.ts index f4870cd5a9de..c88d11bee956 100644 --- a/packages/core/src/tracing/ai/mediaStripping.ts +++ b/packages/core/src/tracing/ai/mediaStripping.ts @@ -47,6 +47,8 @@ export function isContentMedia(part: unknown): part is ContentMedia { hasInputAudio(part) || hasFileData(part) || hasMediaTypeData(part) || + hasVercelFileData(part) || + hasVercelImageData(part) || hasBlobOrBase64Type(part) || hasB64Json(part) || hasImageGenerationResult(part) || @@ -113,6 +115,34 @@ function hasMediaTypeData(part: NonNullable): part is { media_type: str return 'media_type' in part && typeof part.media_type === 'string' && 'data' in part; } +/** + * Check for Vercel AI SDK file format: { type: "file", mediaType: "...", data: "..." } + */ +function hasVercelFileData(part: NonNullable): part is { type: 'file'; mediaType: string; data: string } { + return ( + 'type' in part && + part.type === 'file' && + 'mediaType' in part && + typeof part.mediaType === 'string' && + 'data' in part && + typeof part.data === 'string' + ); +} + +/** + * Check for Vercel AI SDK image format: { type: "image", image: "base64...", mimeType: "..." } + */ +function hasVercelImageData(part: NonNullable): part is { type: 'image'; image: string; mimeType: string } { + return ( + 'type' in part && + part.type === 'image' && + 'image' in part && + typeof part.image === 'string' && + 'mimeType' in part && + typeof part.mimeType === 'string' + ); +} + function hasBlobOrBase64Type(part: NonNullable): part is { type: 'blob' | 'base64'; content: string } { return 'type' in part && (part.type === 'blob' || part.type === 'base64'); } @@ -131,7 +161,7 @@ function hasDataUri(part: NonNullable): part is { uri: string } { const REMOVED_STRING = '[Blob substitute]'; -const MEDIA_FIELDS = ['image_url', 'data', 'content', 'b64_json', 'result', 'uri'] as const; +const MEDIA_FIELDS = ['image_url', 'data', 'content', 'b64_json', 'result', 'uri', 'image'] as const; /** * Replace inline binary data in a single media content part with a placeholder. From 050087584b2eff8e58ce5a8cdb5b8977f566894e Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Mon, 9 Mar 2026 15:55:08 +0100 Subject: [PATCH 4/9] fix media type --- .../core/src/tracing/ai/messageTruncation.ts | 50 ++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/packages/core/src/tracing/ai/messageTruncation.ts b/packages/core/src/tracing/ai/messageTruncation.ts index 499d25ee6e47..aaa49a0b11df 100644 --- a/packages/core/src/tracing/ai/messageTruncation.ts +++ b/packages/core/src/tracing/ai/messageTruncation.ts @@ -229,11 +229,55 @@ function truncatePartsMessage(message: PartsMessage, maxBytes: number): unknown[ } } +/** + * Truncate a message with `content: [...]` array format (Vercel AI SDK, OpenAI multimodal). + * Content arrays contain parts like `{ type: "text", text: "..." }`. + * + * @param message - Message with content array property + * @param maxBytes - Maximum byte limit + * @returns Array with truncated message, or empty array if it doesn't fit + */ +function truncateContentArrayMessage(message: ContentArrayMessage, maxBytes: number): unknown[] { + const { content } = message; + + // Find the first text part to truncate + const textPartIndex = content.findIndex( + part => part && typeof part === 'object' && 'type' in part && part.type === 'text' && 'text' in part, + ); + + if (textPartIndex === -1) { + // No text part found, cannot truncate safely + return []; + } + + const textPart = content[textPartIndex] as { type: string; text: string }; + + // Calculate overhead (message structure with empty text) + const emptyContent = content.map((part, i) => + i === textPartIndex ? { ...textPart, text: '' } : part, + ); + const emptyMessage = { ...message, content: emptyContent }; + const overhead = jsonBytes(emptyMessage); + const availableForText = maxBytes - overhead; + + if (availableForText <= 0) { + return []; + } + + const truncatedText = truncateTextByBytes(textPart.text, availableForText); + const truncatedContent = content.map((part, i) => + i === textPartIndex ? { ...textPart, text: truncatedText } : part, + ); + + return [{ ...message, content: truncatedContent }]; +} + /** * Truncate a single message to fit within maxBytes. * - * Supports two message formats: + * Supports three message formats: * - OpenAI/Anthropic: `{ ..., content: string }` + * - Vercel AI/OpenAI multimodal: `{ ..., content: Array<{type, text?, ...}> }` * - Google GenAI: `{ ..., parts: Array }` * * @param message - The message to truncate @@ -257,6 +301,10 @@ function truncateSingleMessage(message: unknown, maxBytes: number): unknown[] { return truncateContentMessage(message, maxBytes); } + if (isContentArrayMessage(message)) { + return truncateContentArrayMessage(message, maxBytes); + } + if (isPartsMessage(message)) { return truncatePartsMessage(message, maxBytes); } From f28653085bef40b052ed34d552f0a7d661b73cb0 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 10 Mar 2026 11:30:47 +0100 Subject: [PATCH 5/9] deprecate other attributes --- .../suites/tracing/vercelai/test.ts | 33 ++++++++++--------- .../suites/tracing/vercelai/v5/test.ts | 30 ++++++++++------- .../suites/tracing/vercelai/v6/test.ts | 13 +------- .../core/src/tracing/ai/messageTruncation.ts | 4 +-- packages/core/src/tracing/vercel-ai/index.ts | 8 +++-- 5 files changed, 44 insertions(+), 44 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index f57d46f02c16..c0f9c662bd59 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -5,13 +5,12 @@ import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, @@ -92,9 +91,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -120,11 +120,12 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -202,10 +203,10 @@ describe('Vercel AI integration', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', - [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -232,9 +233,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -266,11 +268,12 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -304,9 +307,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -337,11 +341,12 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -375,10 +380,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather"}],"finish_reason":"tool-calls"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, @@ -410,12 +415,12 @@ describe('Vercel AI integration', () => { [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather"}],"finish_reason":"tool-calls"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, @@ -812,7 +817,6 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages', }), }), // Second call: Last message is small and kept intact @@ -822,7 +826,6 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining( 'This is a small message that fits within the limit', ), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 5365d9425ce4..2c81a869cd0d 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -5,13 +5,12 @@ import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, @@ -94,7 +93,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, @@ -128,7 +128,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), @@ -197,11 +198,11 @@ describe('Vercel AI integration (V5)', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: { 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', - [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -232,8 +233,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -259,10 +261,11 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -292,8 +295,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -325,7 +329,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), @@ -351,8 +356,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', 'vercel.ai.response.finishReason': 'tool-calls', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -377,14 +383,14 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.pipeline.name': 'generateText.doGenerate', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 110d0aeab5a4..8cee5653128b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -9,8 +9,6 @@ import { GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, @@ -96,7 +94,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', @@ -130,7 +127,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], @@ -200,11 +196,11 @@ describe('Vercel AI integration (V6)', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', - [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', @@ -236,7 +232,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -265,7 +260,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -296,7 +290,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -329,7 +322,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], @@ -355,7 +347,6 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -385,9 +376,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], diff --git a/packages/core/src/tracing/ai/messageTruncation.ts b/packages/core/src/tracing/ai/messageTruncation.ts index aaa49a0b11df..aade37b84474 100644 --- a/packages/core/src/tracing/ai/messageTruncation.ts +++ b/packages/core/src/tracing/ai/messageTruncation.ts @@ -253,9 +253,7 @@ function truncateContentArrayMessage(message: ContentArrayMessage, maxBytes: num const textPart = content[textPartIndex] as { type: string; text: string }; // Calculate overhead (message structure with empty text) - const emptyContent = content.map((part, i) => - i === textPartIndex ? { ...textPart, text: '' } : part, - ); + const emptyContent = content.map((part, i) => (i === textPartIndex ? { ...textPart, text: '' } : part)); const emptyMessage = { ...message, content: emptyContent }; const overhead = jsonBytes(emptyMessage); const availableForText = maxBytes - overhead; diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 7ecdd573b46a..c96f853b35d0 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -312,8 +312,12 @@ function processEndedVercelAiSpan(span: SpanJSON): void { // Build gen_ai.output.messages from response text and/or tool calls buildOutputMessages(attributes); - renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); - renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); + // Remove the source attributes since they're now captured in gen_ai.output.messages + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); From 3b1e3540d7d9679e5543c71aa68d6bb68e61bf6b Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 10 Mar 2026 11:36:24 +0100 Subject: [PATCH 6/9] bump size --- .size-limit.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.size-limit.js b/.size-limit.js index 38a83445d021..5bf0e534fbda 100644 --- a/.size-limit.js +++ b/.size-limit.js @@ -326,7 +326,7 @@ module.exports = [ import: createImport('init'), ignore: [...builtinModules, ...nodePrefixedBuiltinModules], gzip: true, - limit: '175 KB', + limit: '176 KB', }, { name: '@sentry/node - without tracing', From 0ead8e1859769ab027c6715f13248cc3e96f2dfa Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 10 Mar 2026 12:00:36 +0100 Subject: [PATCH 7/9] fix nextjs test s --- .../test-applications/nextjs-15/tests/ai-test.test.ts | 4 ++-- .../test-applications/nextjs-16/tests/ai-test.test.ts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts index 9fd05f83c5f9..a53f8986512a 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts @@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id'); expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider'); expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?'); - expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); + expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?'); - expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); + expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts index f7dc95e7d00d..5c519cb89a03 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts @@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id'); expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider'); expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?'); - expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); + expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?'); - expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); + expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; From 32a90a45c17a9ca1fa1a5f1f821fd9acbf4a37f6 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 10 Mar 2026 13:07:49 +0100 Subject: [PATCH 8/9] resolve pr review comments --- .../suites/tracing/vercelai/v6/test.ts | 17 +++++++++++++++++ packages/core/src/tracing/ai/mediaStripping.ts | 4 ++++ packages/core/src/tracing/vercel-ai/index.ts | 16 +++++++++------- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 8cee5653128b..c077c1be2f76 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -4,6 +4,7 @@ import { afterAll, describe, expect } from 'vitest'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, @@ -97,6 +98,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -129,6 +132,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -231,6 +236,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -257,6 +264,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', @@ -289,6 +298,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -324,6 +335,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -346,6 +359,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -371,6 +386,8 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/packages/core/src/tracing/ai/mediaStripping.ts b/packages/core/src/tracing/ai/mediaStripping.ts index c88d11bee956..b206a1ef2058 100644 --- a/packages/core/src/tracing/ai/mediaStripping.ts +++ b/packages/core/src/tracing/ai/mediaStripping.ts @@ -131,6 +131,7 @@ function hasVercelFileData(part: NonNullable): part is { type: 'file'; /** * Check for Vercel AI SDK image format: { type: "image", image: "base64...", mimeType: "..." } + * Only matches base64/data URIs, not HTTP/HTTPS URLs (which should be preserved). */ function hasVercelImageData(part: NonNullable): part is { type: 'image'; image: string; mimeType: string } { return ( @@ -138,6 +139,9 @@ function hasVercelImageData(part: NonNullable): part is { type: 'image' part.type === 'image' && 'image' in part && typeof part.image === 'string' && + // Only strip base64/data URIs, not HTTP/HTTPS URLs which should be preserved as references + !part.image.startsWith('http://') && + !part.image.startsWith('https://') && 'mimeType' in part && typeof part.mimeType === 'string' ); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index c96f853b35d0..9a9237f8e642 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -242,7 +242,8 @@ function buildOutputMessages(attributes: Record): void { } } - // Only set if we have parts + // Only set output messages and delete source attributes if we have parts + // This ensures we don't lose telemetry data if parsing fails if (parts.length > 0) { const outputMessage = { role: 'assistant', @@ -251,6 +252,12 @@ function buildOutputMessages(attributes: Record): void { }; attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE] = JSON.stringify([outputMessage]); + + // Remove the source attributes since they're now captured in gen_ai.output.messages + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; } } @@ -310,14 +317,9 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE); // Build gen_ai.output.messages from response text and/or tool calls + // Note: buildOutputMessages also removes the source attributes when output is successfully generated buildOutputMessages(attributes); - // Remove the source attributes since they're now captured in gen_ai.output.messages - // eslint-disable-next-line @typescript-eslint/no-dynamic-delete - delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; - // eslint-disable-next-line @typescript-eslint/no-dynamic-delete - delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; - renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); From 79fa7699dae07b10a5c89dff8a64e8a6ca4d1143 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 10 Mar 2026 13:44:06 +0100 Subject: [PATCH 9/9] normalize message to follow otel spec --- .../suites/tracing/vercelai/test.ts | 4 +- .../suites/tracing/vercelai/v5/test.ts | 4 +- .../suites/tracing/vercelai/v6/test.ts | 4 +- .../core/src/tracing/ai/mediaStripping.ts | 15 +++++--- packages/core/src/tracing/vercel-ai/index.ts | 37 ++++++++++++++++--- 5 files changed, 47 insertions(+), 17 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index c0f9c662bd59..809ba2308622 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -381,7 +381,7 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, @@ -416,7 +416,7 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 2c81a869cd0d..a84b80e9abc5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -357,7 +357,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -384,7 +384,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index c077c1be2f76..39ee00254373 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -360,7 +360,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -387,7 +387,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: - '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]', + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/packages/core/src/tracing/ai/mediaStripping.ts b/packages/core/src/tracing/ai/mediaStripping.ts index b206a1ef2058..cb8e5d7b959e 100644 --- a/packages/core/src/tracing/ai/mediaStripping.ts +++ b/packages/core/src/tracing/ai/mediaStripping.ts @@ -117,6 +117,7 @@ function hasMediaTypeData(part: NonNullable): part is { media_type: str /** * Check for Vercel AI SDK file format: { type: "file", mediaType: "...", data: "..." } + * Only matches base64/binary data, not HTTP/HTTPS URLs (which should be preserved). */ function hasVercelFileData(part: NonNullable): part is { type: 'file'; mediaType: string; data: string } { return ( @@ -125,15 +126,19 @@ function hasVercelFileData(part: NonNullable): part is { type: 'file'; 'mediaType' in part && typeof part.mediaType === 'string' && 'data' in part && - typeof part.data === 'string' + typeof part.data === 'string' && + // Only strip base64/binary data, not HTTP/HTTPS URLs which should be preserved as references + !part.data.startsWith('http://') && + !part.data.startsWith('https://') ); } /** - * Check for Vercel AI SDK image format: { type: "image", image: "base64...", mimeType: "..." } + * Check for Vercel AI SDK image format: { type: "image", image: "base64...", mimeType?: "..." } * Only matches base64/data URIs, not HTTP/HTTPS URLs (which should be preserved). + * Note: mimeType is optional in Vercel AI SDK image parts. */ -function hasVercelImageData(part: NonNullable): part is { type: 'image'; image: string; mimeType: string } { +function hasVercelImageData(part: NonNullable): part is { type: 'image'; image: string; mimeType?: string } { return ( 'type' in part && part.type === 'image' && @@ -141,9 +146,7 @@ function hasVercelImageData(part: NonNullable): part is { type: 'image' typeof part.image === 'string' && // Only strip base64/data URIs, not HTTP/HTTPS URLs which should be preserved as references !part.image.startsWith('http://') && - !part.image.startsWith('https://') && - 'mimeType' in part && - typeof part.mimeType === 'string' + !part.image.startsWith('https://') ); } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 9a9237f8e642..8b62079295af 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -180,12 +180,39 @@ function findToolDescription(spans: SpanJSON[], toolName: string): string | unde /** * Tool call structure from Vercel AI SDK - * Note: Vercel AI uses 'input' for arguments in ai.response.toolCalls + * Note: V5/V6 use 'input' for arguments, V4 and earlier use 'args' */ interface VercelToolCall { toolCallId: string; toolName: string; - input: Record; + input?: Record | string; // V5/V6 + args?: string; // V4 and earlier +} + +/** + * Normalize finish reason to match OpenTelemetry semantic conventions. + * Valid values: "stop", "length", "content_filter", "tool_call", "error" + * + * Vercel AI SDK uses "tool-calls" (plural, with hyphen) which we map to "tool_call". + */ +function normalizeFinishReason(finishReason: unknown): string { + if (typeof finishReason !== 'string') { + return 'stop'; + } + + // Map Vercel AI SDK finish reasons to OpenTelemetry semantic convention values + switch (finishReason) { + case 'tool-calls': + return 'tool_call'; + case 'stop': + case 'length': + case 'content_filter': + case 'error': + return finishReason; + default: + // For unknown values, return as-is (schema allows arbitrary strings) + return finishReason; + } } /** @@ -227,8 +254,8 @@ function buildOutputMessages(attributes: Record): void { if (Array.isArray(toolCalls)) { for (const toolCall of toolCalls) { - // Vercel AI SDK uses 'input' for tool call arguments - const args = toolCall.input; + // V5/V6 use 'input', V4 and earlier use 'args' + const args = toolCall.input ?? toolCall.args; parts.push({ type: 'tool_call', id: toolCall.toolCallId, @@ -248,7 +275,7 @@ function buildOutputMessages(attributes: Record): void { const outputMessage = { role: 'assistant', parts, - finish_reason: typeof finishReason === 'string' ? finishReason : 'stop', + finish_reason: normalizeFinishReason(finishReason), }; attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE] = JSON.stringify([outputMessage]);