From abce6f5f6e61ce576194d3c497b0e4c1d154cf28 Mon Sep 17 00:00:00 2001 From: isaacs Date: Tue, 25 Nov 2025 22:31:32 -0800 Subject: [PATCH 1/4] fix(tracing): Add missing attributes in vercel-ai spans (#18333) Also, sort the fields in the integration test, in order to more easily se which fields were missing/misnamed in the fixture objects. Fix JS-1216 --- .../suites/tracing/vercelai/test.ts | 401 ++++++++++-------- packages/core/src/tracing/vercel-ai/index.ts | 12 +- packages/core/src/tracing/vercel-ai/utils.ts | 37 ++ .../test/lib/utils/vercelai-utils.test.ts | 55 +++ 4 files changed, 319 insertions(+), 186 deletions(-) create mode 100644 packages/core/test/lib/utils/vercelai-utils.test.ts diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 04ff4a0ac52c..afff2b1f8f28 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -13,14 +13,7 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -28,6 +21,13 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -37,27 +37,26 @@ describe('Vercel AI integration', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -67,24 +66,25 @@ describe('Vercel AI integration', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -94,30 +94,29 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, + 'vercel.ai.prompt.format': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -127,14 +126,7 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -142,6 +134,13 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -151,16 +150,6 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -172,6 +161,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -181,13 +179,13 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -206,50 +204,45 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'First span here!', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.prompt.format': 'prompt', 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'First span here!', 'gen_ai.system': 'mock-provider', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -257,123 +250,142 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': 'prompt', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, + 'vercel.ai.prompt.format': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, + 'gen_ai.request.messages': expect.any(String), 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), 'gen_ai.system': 'mock-provider', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -381,29 +393,50 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': expect.any(String), + 'vercel.ai.prompt.toolChoice': expect.any(String), + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Seventh span - tool call execution span expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.input': expect.any(String), + 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), ]), }; @@ -427,13 +460,7 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -441,6 +468,12 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -449,16 +482,6 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -470,6 +493,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -478,13 +510,13 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -548,13 +580,7 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -562,6 +588,12 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -570,16 +602,6 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -591,6 +613,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -599,13 +630,13 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -642,7 +673,7 @@ describe('Vercel AI integration', () => { let transactionEvent: Event | undefined; let errorEvent: Event | undefined; - const runner = await createRunner() + const runner = createRunner() .expect({ transaction: transaction => { transactionEvent = transaction; diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index f07244088ff9..8ee973197847 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -10,7 +10,12 @@ import { import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; -import { accumulateTokensForParent, applyAccumulatedTokens, convertAvailableToolsToJsonString } from './utils'; +import { + accumulateTokensForParent, + applyAccumulatedTokens, + convertAvailableToolsToJsonString, + requestMessagesFromPrompt, +} from './utils'; import type { ProviderMetadata } from './vercel-ai-attributes'; import { AI_MODEL_ID_ATTRIBUTE, @@ -141,6 +146,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); + renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, 'gen_ai.request.model'); addProviderMetadataToAttributes(attributes); @@ -206,6 +212,10 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute if (attributes[AI_PROMPT_ATTRIBUTE]) { const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]); span.setAttribute('gen_ai.prompt', truncatedPrompt); + + if (!attributes['gen_ai.request.messages'] && !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]) { + requestMessagesFromPrompt(span, attributes[AI_PROMPT_ATTRIBUTE]); + } } if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 9a0b57eb16f7..6be627264686 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -1,6 +1,7 @@ import type { TraceContext } from '../../types-hoist/context'; import type { Span, SpanJSON } from '../../types-hoist/span'; import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; @@ -87,3 +88,39 @@ export function convertAvailableToolsToJsonString(tools: unknown[]): string { }); return JSON.stringify(toolObjects); } + +/** + * Convert the prompt string to messages array + */ +export function convertPromptToMessages(prompt: string): { role: string; content: string }[] | undefined { + try { + const p = JSON.parse(prompt); + if (!!p && typeof p === 'object') { + const { prompt, system } = p; + if (typeof prompt === 'string' || typeof system === 'string') { + const messages: { role: string; content: string }[] = []; + if (typeof system === 'string') { + messages.push({ role: 'system', content: system }); + } + if (typeof prompt === 'string') { + messages.push({ role: 'user', content: prompt }); + } + return messages.length ? messages : []; + } + } + // eslint-disable-next-line no-empty + } catch {} + return undefined; +} + +/** + * Generate a request.messages JSON array from the prompt field in the + * invoke_agent op + */ +export function requestMessagesFromPrompt(span: Span, prompt: unknown): void { + if (typeof prompt !== 'string') return; + const maybeMessages = convertPromptToMessages(prompt); + if (maybeMessages !== undefined) { + span.setAttribute('gen_ai.request.messages', getTruncatedJsonString(maybeMessages)); + } +} diff --git a/packages/core/test/lib/utils/vercelai-utils.test.ts b/packages/core/test/lib/utils/vercelai-utils.test.ts new file mode 100644 index 000000000000..d0161a39664d --- /dev/null +++ b/packages/core/test/lib/utils/vercelai-utils.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it } from 'vitest'; +import { convertPromptToMessages } from '../../../src/tracing/vercel-ai/utils'; + +describe('vercel-ai-utils', () => { + describe('convertPromptToMessages', () => { + it('should convert a prompt with system to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + system: 'You are a friendly robot', + prompt: 'Hello, robot', + }), + ), + ).toStrictEqual([ + { role: 'system', content: 'You are a friendly robot' }, + { role: 'user', content: 'Hello, robot' }, + ]); + }); + + it('should convert a system prompt to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + system: 'You are a friendly robot', + }), + ), + ).toStrictEqual([{ role: 'system', content: 'You are a friendly robot' }]); + }); + + it('should convert a user only prompt to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + prompt: 'Hello, robot', + }), + ), + ).toStrictEqual([{ role: 'user', content: 'Hello, robot' }]); + }); + + it('should ignore unexpected data', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + randomField: 'Hello, robot', + nothing: 'that we know how to handle', + }), + ), + ).toBe(undefined); + }); + + it('should not break on invalid json', () => { + expect(convertPromptToMessages('this is not json')).toBe(undefined); + }); + }); +}); From eb68374f996ac18fe1ebadab8dad5f42d9b34180 Mon Sep 17 00:00:00 2001 From: isaacs Date: Mon, 1 Dec 2025 15:26:49 -0800 Subject: [PATCH 2/4] fixup! fix(tracing): Add missing attributes in vercel-ai spans (#18333) --- packages/core/src/tracing/vercel-ai/index.ts | 16 +++------- packages/core/src/tracing/vercel-ai/utils.ts | 33 ++++++++++++++------ 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 8ee973197847..b21ec213ecaa 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -4,10 +4,10 @@ import type { Event } from '../../types-hoist/event'; import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON, SpanOrigin } from '../../types-hoist/span'; import { spanToJSON } from '../../utils/spanUtils'; import { + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; import { @@ -20,7 +20,6 @@ import type { ProviderMetadata } from './vercel-ai-attributes'; import { AI_MODEL_ID_ATTRIBUTE, AI_MODEL_PROVIDER_ATTRIBUTE, - AI_PROMPT_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE, AI_PROMPT_TOOLS_ATTRIBUTE, AI_RESPONSE_OBJECT_ATTRIBUTE, @@ -36,6 +35,7 @@ import { AI_USAGE_CACHED_INPUT_TOKENS_ATTRIBUTE, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, @@ -136,7 +136,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { } // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages'); + renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); @@ -146,7 +146,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); - renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, 'gen_ai.request.model'); + renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE); addProviderMetadataToAttributes(attributes); @@ -209,14 +209,8 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute span.setAttribute('gen_ai.function_id', functionId); } - if (attributes[AI_PROMPT_ATTRIBUTE]) { - const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]); - span.setAttribute('gen_ai.prompt', truncatedPrompt); + requestMessagesFromPrompt(span, attributes); - if (!attributes['gen_ai.request.messages'] && !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]) { - requestMessagesFromPrompt(span, attributes[AI_PROMPT_ATTRIBUTE]); - } - } if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); } diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 6be627264686..bc390ccc1672 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -1,9 +1,14 @@ import type { TraceContext } from '../../types-hoist/context'; -import type { Span, SpanJSON } from '../../types-hoist/span'; -import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import type { Span, SpanAttributes, SpanJSON } from '../../types-hoist/span'; +import { + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; +import { AI_PROMPT_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE } from './vercel-ai-attributes'; /** * Accumulates token data from a span to its parent in the token accumulator map. @@ -92,7 +97,7 @@ export function convertAvailableToolsToJsonString(tools: unknown[]): string { /** * Convert the prompt string to messages array */ -export function convertPromptToMessages(prompt: string): { role: string; content: string }[] | undefined { +export function convertPromptToMessages(prompt: string): { role: string; content: string }[] { try { const p = JSON.parse(prompt); if (!!p && typeof p === 'object') { @@ -105,22 +110,30 @@ export function convertPromptToMessages(prompt: string): { role: string; content if (typeof prompt === 'string') { messages.push({ role: 'user', content: prompt }); } - return messages.length ? messages : []; + return messages; } } // eslint-disable-next-line no-empty } catch {} - return undefined; + return []; } /** * Generate a request.messages JSON array from the prompt field in the * invoke_agent op */ -export function requestMessagesFromPrompt(span: Span, prompt: unknown): void { - if (typeof prompt !== 'string') return; - const maybeMessages = convertPromptToMessages(prompt); - if (maybeMessages !== undefined) { - span.setAttribute('gen_ai.request.messages', getTruncatedJsonString(maybeMessages)); +export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void { + if (attributes[AI_PROMPT_ATTRIBUTE]) { + const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]); + span.setAttribute('gen_ai.prompt', truncatedPrompt); + } + const prompt = attributes[AI_PROMPT_ATTRIBUTE]; + if ( + typeof prompt === 'string' && + !attributes[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE] && + !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] + ) { + const messages = convertPromptToMessages(prompt); + if (messages.length) span.setAttribute(GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, getTruncatedJsonString(messages)); } } From 346d3b37e2f29dc1f44daa7d88748c42dc11a973 Mon Sep 17 00:00:00 2001 From: isaacs Date: Mon, 1 Dec 2025 15:50:30 -0800 Subject: [PATCH 3/4] fixup! fixup! fix(tracing): Add missing attributes in vercel-ai spans (#18333) --- .../suites/tracing/vercelai/v5/test.ts | 40 +++++++++---------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 56860af76925..02aeda69c218 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -13,7 +13,7 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -41,10 +41,9 @@ describe('Vercel AI integration (V5)', () => { 'operation.name': 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', @@ -66,7 +65,7 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -76,6 +75,7 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -97,10 +97,9 @@ describe('Vercel AI integration (V5)', () => { 'operation.name': 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', @@ -124,7 +123,7 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -147,7 +146,7 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -157,7 +156,6 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', @@ -202,11 +200,12 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': 'First span here!', 'vercel.ai.settings.maxRetries': 2, @@ -228,7 +227,7 @@ describe('Vercel AI integration (V5)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -240,7 +239,6 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', @@ -260,11 +258,12 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -291,10 +290,9 @@ describe('Vercel AI integration (V5)', () => { 'operation.name': 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', @@ -318,13 +316,13 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - // 'gen_ai.response.text': 'Tool call completed!', 'gen_ai.response.tool_calls': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -345,7 +343,7 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -360,7 +358,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.response.tool_calls': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', @@ -441,7 +438,7 @@ describe('Vercel AI integration (V5)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -462,7 +459,7 @@ describe('Vercel AI integration (V5)', () => { }), expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', + 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -472,7 +469,6 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', From 71b949bedc98a4b00b44e5970ea5d44f580ab988 Mon Sep 17 00:00:00 2001 From: isaacs Date: Mon, 1 Dec 2025 16:05:18 -0800 Subject: [PATCH 4/4] fixup! fixup! fixup! fix(tracing): Add missing attributes in vercel-ai spans (#18333) --- packages/core/test/lib/utils/vercelai-utils.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/core/test/lib/utils/vercelai-utils.test.ts b/packages/core/test/lib/utils/vercelai-utils.test.ts index d0161a39664d..be329e6f5970 100644 --- a/packages/core/test/lib/utils/vercelai-utils.test.ts +++ b/packages/core/test/lib/utils/vercelai-utils.test.ts @@ -45,11 +45,11 @@ describe('vercel-ai-utils', () => { nothing: 'that we know how to handle', }), ), - ).toBe(undefined); + ).toStrictEqual([]); }); it('should not break on invalid json', () => { - expect(convertPromptToMessages('this is not json')).toBe(undefined); + expect(convertPromptToMessages('this is not json')).toStrictEqual([]); }); }); });