Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import * as Sentry from '@sentry/browser';

window.Sentry = Sentry;

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
tracesSampleRate: 1,
debug: true,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
class MockLanguageModelV1 {
constructor(config) {
this.doGenerate = config.doGenerate;
}
}

export const mockModelBasic = new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Mock response from model',
}),
});

// Mock implementation of generateText that uses the mock models
export async function mockGenerateText(options) {
const model = options.model;

return await window.Sentry.startSpan(
{
name: 'ai.generateText',
attributes: {
'ai.model.id': 'gpt-4-turbo',
'ai.model.provider': 'openai',
},
},
async () => {
const result = await model.doGenerate();

return await window.Sentry.startSpan(
{
name: 'ai.generateText.doGenerate',
attributes: {
'ai.model.id': 'gpt-4-turbo',
'ai.model.provider': 'openai',
'ai.prompt': options.prompt,
'ai.response.text': result.text,
'ai.usage.promptTokens': result.usage.promptTokens,
'ai.usage.completionTokens': result.usage.completionTokens,
},
},
async () => {
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

return {
text: result.text,
usage: result.usage,
};
},
);
},
);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import { addVercelAiProcessors, getClient } from '@sentry/browser';
import { mockGenerateText, mockModelBasic } from './mocks.js';

const client = getClient();
addVercelAiProcessors(client);

const result = await mockGenerateText({
model: mockModelBasic,
prompt: 'Test prompt',
});

console.log('Generated text result:', result);
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import { expect } from '@playwright/test';
import { sentryTest } from '../../../../utils/fixtures';
import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not crash in the browser
// and that gen_ai transactions are sent.

sentryTest('manual Vercel AI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
Comment on lines +5 to +10
Copy link
Member

@Lms24 Lms24 Nov 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

m: I don't have a super strong opinion here but given node and browser do have their differences, I'd prefer a bit stricter tests where we test against more data. Helps us find issues as well. Ill also defer to @RulaKhaled for the final call. Whatever you think makes most sense :)

Update: I bumped this to logaf-M: Can we check against some data that should be aded from our instrumentation? The attribute we check against is from the manually started span, no?

const transactionPromise = waitForTransactionRequest(page, event => {
return !!event.transaction?.includes('generateText');
});

const url = await getLocalTestUrl({ testDir: __dirname });
await page.goto(url);

const req = await transactionPromise;

const eventData = envelopeRequestParser(req);

// Verify it's a gen_ai transaction
expect(eventData.transaction).toBe('generateText');
expect(eventData.contexts?.trace?.op).toBe('gen_ai.invoke_agent');
expect(eventData.contexts?.trace?.origin).toBe('auto.vercelai.otel');
expect(eventData.contexts?.trace?.data).toMatchObject({
'gen_ai.response.model': 'gpt-4-turbo',
});
});
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ const IMPORTED_INTEGRATION_CDN_BUNDLE_PATHS: Record<string, string> = {
instrumentOpenAiClient: 'instrumentopenaiclient',
instrumentGoogleGenAIClient: 'instrumentgooglegenaiclient',
instrumentLangGraph: 'instrumentlanggraph',
addVercelAiProcessors: 'addvercelaiprocessors',
createLangChainCallbackHandler: 'createlangchaincallbackhandler',
// technically, this is not an integration, but let's add it anyway for simplicity
makeMultiplexedTransport: 'multiplexedtransport',
Expand Down
1 change: 1 addition & 0 deletions packages/browser/rollup.bundle.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ const reexportedPluggableIntegrationFiles = [
'instrumentopenaiclient',
'instrumentgooglegenaiclient',
'instrumentlanggraph',
'addvercelaiprocessors',
'createlangchaincallbackhandler',
];

Expand Down
1 change: 1 addition & 0 deletions packages/browser/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ export {
instrumentOpenAiClient,
instrumentGoogleGenAIClient,
instrumentLangGraph,
addVercelAiProcessors,
createLangChainCallbackHandler,
logger,
} from '@sentry/core';
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export { addVercelAiProcessors } from '@sentry/core';
1 change: 1 addition & 0 deletions packages/browser/src/utils/lazyLoadIntegration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ const LazyLoadableIntegrations = {
instrumentOpenAiClient: 'instrumentopenaiclient',
instrumentGoogleGenAIClient: 'instrumentgooglegenaiclient',
instrumentLangGraph: 'instrumentlanggraph',
addVercelAiProcessors: 'addvercelaiprocessors',
createLangChainCallbackHandler: 'createlangchaincallbackhandler',
} as const;

Expand Down
Loading