diff --git a/examples/client/.env.example b/examples/client/.env.example new file mode 100644 index 000000000..e49986577 --- /dev/null +++ b/examples/client/.env.example @@ -0,0 +1,3 @@ +# LLM API Key +# Get your API key from your LLM provider (OpenAI, Groq, etc.) +LLM_API_KEY=your_api_key_here diff --git a/examples/client/README.md b/examples/client/README.md index 12a2b0d68..ae178ed8d 100644 --- a/examples/client/README.md +++ b/examples/client/README.md @@ -26,6 +26,7 @@ Most clients expect a server to be running. Start one from [`../server/README.md | Scenario | Description | File | | --------------------------------------------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | +| Simple multi-server chatbot | CLI chatbot connecting to multiple MCP servers via STDIO with LLM integration. | [`src/simpleChatbot.ts`](src/simpleChatbot.ts) | | Interactive Streamable HTTP client | CLI client that exercises tools/resources/prompts, notifications, elicitation, and tasks. | [`src/simpleStreamableHttp.ts`](src/simpleStreamableHttp.ts) | | Backwards-compatible client (Streamable HTTP → SSE) | Tries Streamable HTTP first, falls back to legacy SSE on 4xx responses. | [`src/streamableHttpWithSseFallbackClient.ts`](src/streamableHttpWithSseFallbackClient.ts) | | SSE polling client (legacy) | Polls a legacy HTTP+SSE server and demonstrates notification handling. | [`src/ssePollingClient.ts`](src/ssePollingClient.ts) | @@ -37,6 +38,41 @@ Most clients expect a server to be running. Start one from [`../server/README.md | URL elicitation client | Drives URL-mode elicitation flows (sensitive input in a browser). | [`src/elicitationUrlExample.ts`](src/elicitationUrlExample.ts) | | Task interactive client | Demonstrates task-based execution + interactive server→client requests. | [`src/simpleTaskInteractiveClient.ts`](src/simpleTaskInteractiveClient.ts) | +## Simple chatbot example + +The simple chatbot demonstrates connecting to multiple MCP servers simultaneously and integrating with an LLM provider. + +**Configuration:** + +A `servers_config.json` file is included with default server configurations. You can edit it to add or modify servers: + +```json +{ + "mcpServers": { + "everything": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-everything"] + }, + "memory": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-memory"] + } + } +} +``` + +The chatbot will discover tools from all configured servers and allow interactive conversation. Type `quit` or `exit` to end the session. + +**Running:** + +```bash +# Set your LLM API key (OpenAI, Groq, etc.) +export LLM_API_KEY=your_api_key_here + +# Run the chatbot +pnpm --filter @modelcontextprotocol/examples-client exec tsx src/simpleChatbot.ts +``` + ## URL elicitation example (server + client) Run the server first: diff --git a/examples/client/package.json b/examples/client/package.json index d77d6faf2..89934c345 100644 --- a/examples/client/package.json +++ b/examples/client/package.json @@ -43,6 +43,7 @@ "@modelcontextprotocol/tsconfig": "workspace:^", "@modelcontextprotocol/eslint-config": "workspace:^", "@modelcontextprotocol/vitest-config": "workspace:^", - "tsdown": "catalog:devTools" + "tsdown": "catalog:devTools", + "vitest": "catalog:devTools" } } diff --git a/examples/client/servers_config.json b/examples/client/servers_config.json new file mode 100644 index 000000000..3c0c5d2f9 --- /dev/null +++ b/examples/client/servers_config.json @@ -0,0 +1,12 @@ +{ + "mcpServers": { + "everything": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-everything"] + }, + "memory": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-memory"] + } + } +} diff --git a/examples/client/src/simpleChatbot.ts b/examples/client/src/simpleChatbot.ts new file mode 100644 index 000000000..9544a1ce0 --- /dev/null +++ b/examples/client/src/simpleChatbot.ts @@ -0,0 +1,389 @@ +import { readFile } from 'node:fs/promises'; +import type { Interface as ReadlineInterface } from 'node:readline/promises'; +import { createInterface } from 'node:readline/promises'; + +import type { Tool } from '@modelcontextprotocol/client'; +import { Client, StdioClientTransport } from '@modelcontextprotocol/client'; + +interface ServerConfig { + command: string; + args?: string[]; + env?: Record; +} + +interface Config { + mcpServers: Record; +} + +export interface ChatMessage { + role: 'system' | 'user' | 'assistant'; + content: string; +} + +export interface LLMClient { + getResponse(messages: ChatMessage[]): Promise; +} + +/** + * Load configuration from a JSON file + */ +export async function loadConfig(path: string): Promise { + const content = await readFile(path, 'utf-8'); + const config = JSON.parse(content) as Config; + + // Validate required fields + if (!config.mcpServers) { + throw new Error('Config missing required field: mcpServers'); + } + + return config; +} + +/** + * Connect to a single MCP server via STDIO + */ +export async function connectToServer(name: string, config: ServerConfig): Promise { + const transport = new StdioClientTransport({ + command: config.command, + args: config.args, + env: config.env + }); + + const client = new Client({ + name: `chatbot-client-${name}`, + version: '1.0.0' + }); + + await client.connect(transport); + return client; +} + +/** + * Connect to all MCP servers from config in parallel + */ +export async function connectToAllServers(config: Config): Promise> { + const entries = Object.entries(config.mcpServers); + + const clients = await Promise.all(entries.map(([name, serverConfig]) => connectToServer(name, serverConfig))); + + const clientMap = new Map(); + entries.forEach(([name], index) => { + clientMap.set(name, clients[index]!); + }); + + return clientMap; +} + +/** + * ChatSession orchestrates the interaction between user, LLM, and MCP servers. + * Handles tool discovery, execution, and maintains conversation state. + */ +export class ChatSession { + public readonly clients: Map; + public readonly llmClient: LLMClient; + public messages: ChatMessage[] = []; + + constructor(clients: Map, llmClient: LLMClient) { + this.clients = clients; + this.llmClient = llmClient; + } + + /** + * Get all available tools from all connected servers + */ + async getAvailableTools(): Promise> { + const allTools: Array = []; + + for (const [serverName, client] of this.clients.entries()) { + const response = await client.listTools(); + for (const tool of response.tools) { + allTools.push({ ...tool, serverName }); + } + } + + return allTools; + } + + /** + * Parse LLM response for tool call requests, returns null if no tool call is requested + */ + private parseToolCallRequest(llmResponse: string): { tool: string; arguments: unknown } | null { + try { + const parsed = JSON.parse(llmResponse); + if (parsed && typeof parsed === 'object' && 'tool' in parsed && 'arguments' in parsed) { + return parsed as { tool: string; arguments: unknown }; + } + return null; + } catch { + return null; + } + } + + /** + * Process LLM response and execute tool if needed + */ + async processLlmResponse(llmResponse: string): Promise { + const parsedToolCall = this.parseToolCallRequest(llmResponse); + if (parsedToolCall === null) { + return llmResponse; + } + + console.info(`Executing tool: ${parsedToolCall.tool}`); + console.info(`With arguments: ${JSON.stringify(parsedToolCall.arguments)}`); + + // Find which server has this tool + for (const client of this.clients.values()) { + const tools = await client.listTools(); + const hasTool = tools.tools.some(t => t.name === parsedToolCall.tool); + + if (hasTool) { + try { + const result = await client.callTool({ + name: parsedToolCall.tool, + arguments: parsedToolCall.arguments as Record + }); + + return `Tool execution result: ${JSON.stringify(result)}`; + } catch (e) { + const errorMsg = `Error executing tool: ${(e as Error).message}`; + console.error(errorMsg); + return errorMsg; + } + } + } + + return `No server found with tool: ${parsedToolCall.tool}`; + } + + /** + * Build system prompt with available tools + */ + private async buildSystemPrompt(): Promise { + const tools = await this.getAvailableTools(); + const toolDescriptions = tools + .map(tool => { + let desc = `Tool: ${tool.name}\n`; + desc += `Description: ${tool.description || 'No description'}\n`; + desc += 'Arguments:\n'; + if (tool.inputSchema && typeof tool.inputSchema === 'object' && 'properties' in tool.inputSchema) { + const schema = tool.inputSchema as { properties?: Record; required?: string[] }; + const props = schema.properties || {}; + const argsList: string[] = []; + for (const [paramName, paramInfo] of Object.entries(props)) { + const info = paramInfo as { description?: string }; + let argDesc = `- ${paramName}: ${info.description || 'No description'}`; + if (schema.required?.includes(paramName)) { + argDesc += ' (required)'; + } + argsList.push(argDesc); + } + desc += argsList.join('\n'); + } + return desc; + }) + .join('\n'); + + const prompt = [ + 'You are a helpful assistant with access to these tools:', + '', + toolDescriptions, + '', + "Choose the appropriate tool based on the user's question. If no tool is needed, reply directly.", + '', + 'IMPORTANT: When you need to use a tool, you must ONLY respond with the exact JSON object format below, nothing else:', + '{', + ' "tool": "tool-name",', + ' "arguments": {', + ' "argument-name": "value"', + ' }', + '}', + '', + "After receiving a tool's response:", + '1. Transform the raw data into a natural, conversational response', + '2. Keep responses concise but informative', + '3. Focus on the most relevant information', + "4. Use appropriate context from the user's question", + '5. Avoid simply repeating the raw data', + '', + 'Please use only the tools that are explicitly defined above.' + ].join('\n'); + + return prompt; + } + + /** + * Clean up all server connections + */ + async cleanup(): Promise { + for (const [serverName, client] of this.clients.entries()) { + if (!client || !client.transport) continue; + try { + await client.transport.close(); + } catch (e) { + const message = e instanceof Error ? e.message : String(e); + console.warn(`Warning during cleanup of server ${serverName}: ${message}`); + } + } + } + + /** + * Start interactive chat session + * @param readlineInterface Optional readline interface for testing + */ + async start(readlineInterface?: ReadlineInterface): Promise { + const rl = + readlineInterface ?? + createInterface({ + input: process.stdin, + output: process.stdout + }); + + // Handle Ctrl+C + const handleSigInt = async () => { + console.log('\n\nExiting...'); + rl.close(); + await this.cleanup(); + process.exit(0); + }; + + process.on('SIGINT', handleSigInt); + + try { + // Initialize system message + const systemMessage = await this.buildSystemPrompt(); + this.messages = [{ role: 'system', content: systemMessage }]; + + console.log('Chat session started. Type "exit" or "quit" to end.\n'); + + // Chat loop + while (true) { + let userInput: string; + try { + userInput = (await rl.question('You: ')).trim(); + } catch (err) { + // Handle Ctrl+C gracefully (readline throws AbortError) + if (err instanceof Error && (err.message.includes('Ctrl+C') || err.name === 'AbortError')) { + break; + } + console.error('Error reading input:', err); + break; + } + + if (userInput.toLowerCase() === 'quit' || userInput.toLowerCase() === 'exit') { + console.log('\nExiting...'); + break; + } + + this.messages.push({ role: 'user', content: userInput }); + + const llmResponse = await this.llmClient.getResponse(this.messages); + + const result = await this.processLlmResponse(llmResponse); + + if (result !== llmResponse) { + // Tool was executed, add both LLM response and tool result + this.messages.push({ role: 'assistant', content: llmResponse }); + this.messages.push({ role: 'system', content: result }); + + // Get final response from LLM + const finalResponse = await this.llmClient.getResponse(this.messages); + console.log(`\nAssistant: ${finalResponse}`); + this.messages.push({ role: 'assistant', content: finalResponse }); + } else { + console.log(`\nAssistant: ${llmResponse}`); + this.messages.push({ role: 'assistant', content: llmResponse }); + } + } + } catch (e) { + console.error('Error during chat session:', e); + } finally { + process.off('SIGINT', handleSigInt); + rl.close(); + await this.cleanup(); + } + } + + /** + * Get current message history + */ + getMessages(): ChatMessage[] { + return [...this.messages]; + } +} + +/** + * Simple LLM client using OpenAI-compatible API + * Compatible with OpenAI, Groq, and other providers following the OpenAI API format + */ +export class SimpleLLMClient implements LLMClient { + private readonly apiKey: string; + private readonly endpoint: string; + private readonly model: string; + + constructor(apiKey: string, endpoint = 'https://api.groq.com/openai/v1/chat/completions', model = 'llama-3.3-70b-versatile') { + this.apiKey = apiKey; + this.endpoint = endpoint; + this.model = model; + } + + async getResponse(messages: ChatMessage[]): Promise { + const response = await fetch(this.endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}` + }, + body: JSON.stringify({ + model: this.model, + messages, + temperature: 0.7 + }) + }); + + if (!response.ok) { + const errorBody = await response.text(); + throw new Error(`LLM API error: ${response.status} ${response.statusText} - ${errorBody}`); + } + + const data = (await response.json()) as { + choices: Array<{ message: { content: string } }>; + }; + + return data.choices[0]?.message?.content || 'No response from LLM'; + } +} + +export async function main(): Promise { + try { + // Load configuration + const configPath = process.argv[2] || './servers_config.json'; + console.log(`Loading configuration from ${configPath}...`); + const config = await loadConfig(configPath); + + // Get API key from environment variable + const apiKey = process.env.LLM_API_KEY; + if (!apiKey) { + throw new Error('LLM_API_KEY environment variable is required'); + } + + // Connect to all MCP servers + console.log('Connecting to MCP servers...'); + const clients = await connectToAllServers(config); + console.log(`Connected to ${clients.size} server(s): ${[...clients.keys()].join(', ')}\n`); + + // Initialize LLM client (defaults to Groq, can be configured) + const llmClient = new SimpleLLMClient(apiKey); + + // Start chat session + const session = new ChatSession(clients, llmClient); + await session.start(); + } catch (error) { + console.error('Failed to start chatbot:', error); + process.exit(1); + } +} + +// Run if executed directly +if (import.meta.url === `file://${process.argv[1]}`) { + main().catch(console.error); +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 92dbf8253..49e08ec25 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -293,6 +293,9 @@ importers: tsdown: specifier: catalog:devTools version: 0.18.0(@typescript/native-preview@7.0.0-dev.20251218.3)(typescript@5.9.3) + vitest: + specifier: catalog:devTools + version: 4.0.9(@types/node@24.10.3)(tsx@4.20.6) examples/server: dependencies: diff --git a/test/integration/test/client/simpleChatbot.test.ts b/test/integration/test/client/simpleChatbot.test.ts new file mode 100644 index 000000000..43d0e88ef --- /dev/null +++ b/test/integration/test/client/simpleChatbot.test.ts @@ -0,0 +1,256 @@ +import { dirname, join } from 'node:path'; +import type { Interface as ReadlineInterface } from 'node:readline'; +import { fileURLToPath } from 'node:url'; + +import { Client, type Client as ClientType } from '@modelcontextprotocol/client'; +import { InMemoryTransport } from '@modelcontextprotocol/core'; +import { McpServer } from '@modelcontextprotocol/server'; +import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from 'vitest'; +import { z } from 'zod'; + +import type { LLMClient } from '../../../../examples/client/src/simpleChatbot.js'; +import { ChatSession, loadConfig } from '../../../../examples/client/src/simpleChatbot.js'; + +// Get the directory of this test file +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +const cleanup = (clients: ClientType[]) => { + return Promise.all( + clients.map(async client => { + try { + await client.transport?.close(); + } catch { + console.warn('Error closing client transport'); + } + }) + ); +}; + +/** + * Integration tests for simpleChatbot functions and ChatSession class + */ +describe('simpleChatbot', () => { + let testServer: McpServer; + + beforeAll(async () => { + // Create a lightweight in-process test server + testServer = new McpServer({ + name: 'test-server', + version: '1.0.0' + }); + + // Register an echo tool for testing using the new API + testServer.registerTool( + 'echo', + { + description: 'Echoes back the message', + inputSchema: { + message: z.string().describe('Message to echo') + } + }, + async ({ message }) => ({ + content: [ + { + type: 'text', + text: `Echo: ${message}` + } + ] + }) + ); + }); + + afterAll(async () => { + await testServer.close(); + }); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('loadConfig', () => { + it('should load configuration from a JSON file', async () => { + const configPath = join(__dirname, 'test-servers-config.json'); + const config = await loadConfig(configPath); + expect(config).toHaveProperty('mcpServers'); + }); + }); + + describe('ChatSession', () => { + let mockLlmClient: LLMClient; + let mcpClients: Map; + let client: Client; + + beforeEach(async () => { + mockLlmClient = { + getResponse: vi.fn().mockResolvedValue('Mock response') + }; + + // Connect to the in-process test server using InMemoryTransport + const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair(); + + client = new Client( + { + name: 'test-client', + version: '1.0.0' + }, + { + capabilities: {} + } + ); + + await Promise.all([client.connect(clientTransport), testServer.connect(serverTransport)]); + + mcpClients = new Map([['test', client]]); + }); + + afterEach(async () => { + // Clean up all connections + if (mcpClients) { + await cleanup(Array.from(mcpClients.values())); + } + }); + + describe('constructor', () => { + it('should construct with provided clients and llm client', () => { + const session = new ChatSession(mcpClients, mockLlmClient); + expect(session).toBeDefined(); + expect(session.clients).toBe(mcpClients); + expect(session.llmClient).toBe(mockLlmClient); + }); + }); + + describe('getAvailableTools', () => { + it('should aggregate tools from all servers with server names', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + const availableTools = await session.getAvailableTools(); + expect(availableTools.length).toBeGreaterThan(0); // test server provides echo tool + const toolNames = availableTools.map(tool => tool.name); + // Verify we get some tools + expect(toolNames.length).toBeGreaterThan(0); + expect(toolNames).toContain('echo'); + }); + }); + + describe('processLlmResponse', () => { + it('Should detect if LLM wants to call a tool, and execute it', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + + // Get an actual tool from the connected servers + const availableTools = await session.getAvailableTools(); + expect(availableTools.length).toBeGreaterThan(0); + + // Use echo tool from test server + const echoTool = availableTools.find(t => t.name === 'echo'); + expect(echoTool).toBeDefined(); + + // Simulate processing llm response that requests a tool call with proper arguments + const toolCallResponse = JSON.stringify({ tool: 'echo', arguments: { message: 'test message' } }); + const result = await session.processLlmResponse(toolCallResponse); + expect(result).toContain('Tool execution result'); + expect(result).toContain('Echo: test message'); + }); + it('should return response if no tool invocation is needed', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + const llmResponse = 'This is a simple response.'; + const result = await session.processLlmResponse(llmResponse); + expect(result).toBe(llmResponse); + }); + }); + + describe('cleanup', () => { + it('should close all server connections', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + + // Create spies on all transports + const closeSpies = Array.from(mcpClients.values()).map(client => vi.spyOn(client.transport!, 'close')); + + // Verify none have been called yet + closeSpies.forEach(spy => expect(spy).not.toHaveBeenCalled()); + + // Cleanup - may throw connection closed error which is expected + await session.cleanup().catch(() => { + // Expected: transports may error on close + }); + + // Verify all transports were closed at least once + closeSpies.forEach(spy => expect(spy).toHaveBeenCalled()); + }); + }); + + describe('getMessages', () => { + it('should return empty array initially', () => { + const session = new ChatSession(mcpClients, mockLlmClient); + const messages = session.getMessages(); + expect(messages).toEqual([]); + expect(messages.length).toBe(0); + }); + + it('should return copy of messages', () => { + const session = new ChatSession(mcpClients, mockLlmClient); + session.messages.push({ role: 'user', content: 'test' }); + + const messages = session.getMessages(); + expect(messages).toEqual([{ role: 'user', content: 'test' }]); + + // Verify it's a copy by modifying and checking original + messages.push({ role: 'assistant', content: 'response' }); + expect(session.messages.length).toBe(1); + expect(messages.length).toBe(2); + }); + }); + + describe('start', () => { + it('should handle interactive chat session with user input', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + + // Mock readline interface (Promise-based from readline/promises) + const mockRl = { + question: vi.fn(), + close: vi.fn() + }; + + // Simulate user inputs: one message then exit + mockRl.question.mockResolvedValueOnce('Hello, assistant!').mockResolvedValueOnce('exit'); + + await session.start(mockRl as unknown as ReadlineInterface); + + // Verify messages were added + const messages = session.getMessages(); + expect(messages.length).toBeGreaterThanOrEqual(3); // system + user + assistant + expect(messages.some(m => m.role === 'user' && m.content === 'Hello, assistant!')).toBe(true); + expect(messages.some(m => m.role === 'assistant')).toBe(true); + expect(mockLlmClient.getResponse).toHaveBeenCalled(); + }); + + it('should handle tool call during chat session', async () => { + const session = new ChatSession(mcpClients, mockLlmClient); + + // Get an actual tool from the connected servers + const availableTools = await session.getAvailableTools(); + const echoTool = availableTools.find(t => t.name === 'echo'); + expect(echoTool).toBeDefined(); + + // Mock LLM to return tool call request with proper arguments + vi.mocked(mockLlmClient.getResponse).mockResolvedValueOnce( + JSON.stringify({ tool: 'echo', arguments: { message: 'test' } }) + ); + + const mockRl = { + question: vi.fn(), + close: vi.fn() + }; + + mockRl.question.mockResolvedValueOnce('Use a tool').mockResolvedValueOnce('exit'); + + await session.start(mockRl as unknown as ReadlineInterface); + + const messages = session.getMessages(); + // Tool result should be in a system message after the assistant's tool call + const toolResponse = messages.find(m => m.role === 'system' && m.content.includes('Tool execution result')); + expect(toolResponse).toBeDefined(); + expect(toolResponse?.content).toContain('Echo: test'); + }); + }); + }); +}); diff --git a/test/integration/test/client/test-servers-config.json b/test/integration/test/client/test-servers-config.json new file mode 100644 index 000000000..b538bf665 --- /dev/null +++ b/test/integration/test/client/test-servers-config.json @@ -0,0 +1,8 @@ +{ + "mcpServers": { + "everything": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-everything"] + } + } +} diff --git a/test/integration/test/taskResumability.test.ts b/test/integration/test/taskResumability.test.ts index 1e4d8a0fd..178a95202 100644 --- a/test/integration/test/taskResumability.test.ts +++ b/test/integration/test/taskResumability.test.ts @@ -2,13 +2,13 @@ import { randomUUID } from 'node:crypto'; import { createServer, type Server } from 'node:http'; import { Client, StreamableHTTPClientTransport } from '@modelcontextprotocol/client'; +import type { EventStore, JSONRPCMessage } from '@modelcontextprotocol/server'; import { CallToolResultSchema, LoggingMessageNotificationSchema, McpServer, StreamableHTTPServerTransport } from '@modelcontextprotocol/server'; -import type { EventStore, JSONRPCMessage } from '@modelcontextprotocol/server'; import type { ZodMatrixEntry } from '@modelcontextprotocol/test-helpers'; import { listenOnRandomPort, zodTestMatrix } from '@modelcontextprotocol/test-helpers';