Skip to content

Commit 8a18b0d

Browse files
committed
feat(node): Add LangChain v1 support
1 parent cbecbdf commit 8a18b0d

File tree

11 files changed

+1021
-13
lines changed

11 files changed

+1021
-13
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction.includes('/v1/messages') || event.transaction.includes('/v1/chat/completions')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction.includes('/v1/messages') || event.transaction.includes('/v1/chat/completions')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
import * as Sentry from '@sentry/node';
2+
import express from 'express';
3+
import { initChatModel } from 'langchain';
4+
5+
function startMockOpenAIServer() {
6+
const app = express();
7+
app.use(express.json());
8+
9+
app.post('/v1/chat/completions', (req, res) => {
10+
const model = req.body.model;
11+
12+
if (model === 'error-model') {
13+
res.status(404).json({
14+
error: {
15+
message: 'Model not found',
16+
type: 'invalid_request_error',
17+
param: null,
18+
code: 'model_not_found',
19+
},
20+
});
21+
return;
22+
}
23+
24+
// Simulate OpenAI response
25+
res.json({
26+
id: 'chatcmpl-init-test-123',
27+
object: 'chat.completion',
28+
created: 1677652288,
29+
model: model,
30+
system_fingerprint: 'fp_44709d6fcb',
31+
choices: [
32+
{
33+
index: 0,
34+
message: {
35+
role: 'assistant',
36+
content: 'Hello from initChatModel!',
37+
},
38+
finish_reason: 'stop',
39+
},
40+
],
41+
usage: {
42+
prompt_tokens: 8,
43+
completion_tokens: 12,
44+
total_tokens: 20,
45+
},
46+
});
47+
});
48+
49+
return new Promise(resolve => {
50+
const server = app.listen(0, () => {
51+
resolve(server);
52+
});
53+
});
54+
}
55+
56+
async function run() {
57+
const server = await startMockOpenAIServer();
58+
const baseUrl = `http://localhost:${server.address().port}/v1`;
59+
60+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
61+
// Set OpenAI API key in environment
62+
process.env.OPENAI_API_KEY = 'mock-api-key';
63+
64+
// Test 1: Initialize chat model using unified API with model string
65+
const model1 = await initChatModel('gpt-4o', {
66+
temperature: 0.7,
67+
maxTokens: 100,
68+
modelProvider: 'openai',
69+
configurableFields: ['model'],
70+
configuration: {
71+
baseURL: baseUrl,
72+
},
73+
});
74+
75+
await model1.invoke('Tell me about LangChain');
76+
77+
// Test 2: Initialize with different model
78+
const model2 = await initChatModel('gpt-3.5-turbo', {
79+
temperature: 0.5,
80+
modelProvider: 'openai',
81+
configuration: {
82+
baseURL: baseUrl,
83+
},
84+
});
85+
86+
await model2.invoke([
87+
{ role: 'system', content: 'You are a helpful assistant' },
88+
{ role: 'user', content: 'What is AI?' },
89+
]);
90+
91+
// Test 3: Error handling
92+
try {
93+
const errorModel = await initChatModel('error-model', {
94+
modelProvider: 'openai',
95+
configuration: {
96+
baseURL: baseUrl,
97+
},
98+
});
99+
await errorModel.invoke('This will fail');
100+
} catch {
101+
// Expected error
102+
}
103+
});
104+
105+
await Sentry.flush(2000);
106+
107+
server.close();
108+
}
109+
110+
run();
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import { ChatAnthropic } from '@langchain/anthropic';
2+
import * as Sentry from '@sentry/node';
3+
import express from 'express';
4+
5+
function startMockAnthropicServer() {
6+
const app = express();
7+
app.use(express.json());
8+
9+
app.post('/v1/messages', (req, res) => {
10+
const model = req.body.model;
11+
12+
res.json({
13+
id: 'msg_truncation_test',
14+
type: 'message',
15+
role: 'assistant',
16+
content: [
17+
{
18+
type: 'text',
19+
text: 'Response to truncated messages',
20+
},
21+
],
22+
model: model,
23+
stop_reason: 'end_turn',
24+
stop_sequence: null,
25+
usage: {
26+
input_tokens: 10,
27+
output_tokens: 15,
28+
},
29+
});
30+
});
31+
32+
return new Promise(resolve => {
33+
const server = app.listen(0, () => {
34+
resolve(server);
35+
});
36+
});
37+
}
38+
39+
async function run() {
40+
const server = await startMockAnthropicServer();
41+
const baseUrl = `http://localhost:${server.address().port}`;
42+
43+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
44+
const model = new ChatAnthropic({
45+
model: 'claude-3-5-sonnet-20241022',
46+
apiKey: 'mock-api-key',
47+
clientOptions: {
48+
baseURL: baseUrl,
49+
},
50+
});
51+
52+
const largeContent1 = 'A'.repeat(15000); // ~15KB
53+
const largeContent2 = 'B'.repeat(15000); // ~15KB
54+
const largeContent3 = 'C'.repeat(25000); // ~25KB (will be truncated)
55+
56+
// Create one very large string that gets truncated to only include Cs
57+
await model.invoke(largeContent3 + largeContent2);
58+
59+
// Create an array of messages that gets truncated to only include the last message (result should again contain only Cs)
60+
await model.invoke([
61+
{ role: 'system', content: largeContent1 },
62+
{ role: 'user', content: largeContent2 },
63+
{ role: 'user', content: largeContent3 },
64+
]);
65+
});
66+
67+
await Sentry.flush(2000);
68+
69+
server.close();
70+
}
71+
72+
run();
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
import * as Sentry from '@sentry/node';
2+
import express from 'express';
3+
4+
function startMockAnthropicServer() {
5+
const app = express();
6+
app.use(express.json());
7+
8+
app.post('/v1/messages', (req, res) => {
9+
res.json({
10+
id: 'msg_test123',
11+
type: 'message',
12+
role: 'assistant',
13+
content: [
14+
{
15+
type: 'text',
16+
text: 'Mock response from Anthropic!',
17+
},
18+
],
19+
model: req.body.model,
20+
stop_reason: 'end_turn',
21+
stop_sequence: null,
22+
usage: {
23+
input_tokens: 10,
24+
output_tokens: 15,
25+
},
26+
});
27+
});
28+
29+
return new Promise(resolve => {
30+
const server = app.listen(0, () => {
31+
resolve(server);
32+
});
33+
});
34+
}
35+
36+
async function run() {
37+
const server = await startMockAnthropicServer();
38+
const baseURL = `http://localhost:${server.address().port}`;
39+
40+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
41+
// EDGE CASE: Import and instantiate Anthropic client BEFORE LangChain is imported
42+
// This simulates the timing issue where a user creates an Anthropic client in one file
43+
// before importing LangChain in another file
44+
const { default: Anthropic } = await import('@anthropic-ai/sdk');
45+
const anthropicClient = new Anthropic({
46+
apiKey: 'mock-api-key',
47+
baseURL,
48+
});
49+
50+
// Use the Anthropic client directly - this will be instrumented by the Anthropic integration
51+
await anthropicClient.messages.create({
52+
model: 'claude-3-5-sonnet-20241022',
53+
messages: [{ role: 'user', content: 'Direct Anthropic call' }],
54+
temperature: 0.7,
55+
max_tokens: 100,
56+
});
57+
58+
// NOW import LangChain - at this point it will mark Anthropic to be skipped
59+
// But the client created above is already instrumented
60+
const { ChatAnthropic } = await import('@langchain/anthropic');
61+
62+
// Create a LangChain model - this uses Anthropic under the hood
63+
const langchainModel = new ChatAnthropic({
64+
model: 'claude-3-5-sonnet-20241022',
65+
temperature: 0.7,
66+
maxTokens: 100,
67+
apiKey: 'mock-api-key',
68+
clientOptions: {
69+
baseURL,
70+
},
71+
});
72+
73+
// Use LangChain - this will be instrumented by LangChain integration
74+
await langchainModel.invoke('LangChain Anthropic call');
75+
76+
// Create ANOTHER Anthropic client after LangChain was imported
77+
// This one should NOT be instrumented (skip mechanism works correctly)
78+
const anthropicClient2 = new Anthropic({
79+
apiKey: 'mock-api-key',
80+
baseURL,
81+
});
82+
83+
await anthropicClient2.messages.create({
84+
model: 'claude-3-5-sonnet-20241022',
85+
messages: [{ role: 'user', content: 'Second direct Anthropic call' }],
86+
temperature: 0.7,
87+
max_tokens: 100,
88+
});
89+
});
90+
91+
await Sentry.flush(2000);
92+
server.close();
93+
}
94+
95+
run();

0 commit comments

Comments
 (0)