Skip to content

Commit 999a649

Browse files
committed
feat: merge origin/add-ollama-support with conflict resolution and simplified tests
2 parents 929c3c8 + d5c3a96 commit 999a649

File tree

11 files changed

+569
-99
lines changed

11 files changed

+569
-99
lines changed

packages/agent/src/core/llm/provider.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
*/
44

55
import { AnthropicProvider } from './providers/anthropic.js';
6+
import { OllamaProvider } from './providers/ollama.js';
67
import { OpenAIProvider } from './providers/openai.js';
78
import { ProviderOptions, GenerateOptions, LLMResponse } from './types.js';
89

@@ -41,6 +42,7 @@ const providerFactories: Record<
4142
> = {
4243
anthropic: (model, options) => new AnthropicProvider(model, options),
4344
openai: (model, options) => new OpenAIProvider(model, options),
45+
ollama: (model, options) => new OllamaProvider(model, options),
4446
};
4547

4648
/**
Lines changed: 177 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,177 @@
1+
/**
2+
* Ollama provider implementation
3+
*/
4+
5+
import { TokenUsage } from '../../tokens.js';
6+
import { LLMProvider } from '../provider.js';
7+
import {
8+
GenerateOptions,
9+
LLMResponse,
10+
Message,
11+
ProviderOptions,
12+
} from '../types.js';
13+
14+
/**
15+
* Ollama-specific options
16+
*/
17+
export interface OllamaOptions extends ProviderOptions {
18+
baseUrl?: string;
19+
}
20+
21+
/**
22+
* Ollama provider implementation
23+
*/
24+
export class OllamaProvider implements LLMProvider {
25+
name: string = 'ollama';
26+
provider: string = 'ollama.chat';
27+
model: string;
28+
private baseUrl: string;
29+
30+
constructor(model: string, options: OllamaOptions = {}) {
31+
this.model = model;
32+
this.baseUrl =
33+
options.baseUrl ||
34+
process.env.OLLAMA_BASE_URL ||
35+
'http://localhost:11434';
36+
37+
// Ensure baseUrl doesn't end with a slash
38+
if (this.baseUrl.endsWith('/')) {
39+
this.baseUrl = this.baseUrl.slice(0, -1);
40+
}
41+
}
42+
43+
/**
44+
* Generate text using Ollama API
45+
*/
46+
async generateText(options: GenerateOptions): Promise<LLMResponse> {
47+
const {
48+
messages,
49+
functions,
50+
temperature = 0.7,
51+
maxTokens,
52+
topP,
53+
frequencyPenalty,
54+
presencePenalty,
55+
} = options;
56+
57+
// Format messages for Ollama API
58+
const formattedMessages = this.formatMessages(messages);
59+
60+
try {
61+
// Prepare request options
62+
const requestOptions: any = {
63+
model: this.model,
64+
messages: formattedMessages,
65+
stream: false,
66+
options: {
67+
temperature: temperature,
68+
// Ollama uses top_k instead of top_p, but we'll include top_p if provided
69+
...(topP !== undefined && { top_p: topP }),
70+
...(frequencyPenalty !== undefined && {
71+
frequency_penalty: frequencyPenalty,
72+
}),
73+
...(presencePenalty !== undefined && {
74+
presence_penalty: presencePenalty,
75+
}),
76+
},
77+
};
78+
79+
// Add max_tokens if provided
80+
if (maxTokens !== undefined) {
81+
requestOptions.options.num_predict = maxTokens;
82+
}
83+
84+
// Add functions/tools if provided
85+
if (functions && functions.length > 0) {
86+
requestOptions.tools = functions.map((fn) => ({
87+
name: fn.name,
88+
description: fn.description,
89+
parameters: fn.parameters,
90+
}));
91+
}
92+
93+
// Make the API request
94+
const response = await fetch(`${this.baseUrl}/api/chat`, {
95+
method: 'POST',
96+
headers: {
97+
'Content-Type': 'application/json',
98+
},
99+
body: JSON.stringify(requestOptions),
100+
});
101+
102+
if (!response.ok) {
103+
const errorText = await response.text();
104+
throw new Error(`Ollama API error: ${response.status} ${errorText}`);
105+
}
106+
107+
const data = await response.json();
108+
109+
// Extract content and tool calls
110+
const content = data.message?.content || '';
111+
const toolCalls =
112+
data.message?.tool_calls?.map((toolCall: any) => ({
113+
id:
114+
toolCall.id ||
115+
`tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
116+
name: toolCall.name,
117+
content: JSON.stringify(toolCall.args || toolCall.arguments || {}),
118+
})) || [];
119+
120+
// Create token usage from response data
121+
const tokenUsage = new TokenUsage();
122+
tokenUsage.input = data.prompt_eval_count || 0;
123+
tokenUsage.output = data.eval_count || 0;
124+
125+
return {
126+
text: content,
127+
toolCalls: toolCalls,
128+
tokenUsage: tokenUsage,
129+
};
130+
} catch (error) {
131+
throw new Error(`Error calling Ollama API: ${(error as Error).message}`);
132+
}
133+
}
134+
135+
/**
136+
* Format messages for Ollama API
137+
*/
138+
private formatMessages(messages: Message[]): any[] {
139+
return messages.map((msg) => {
140+
if (
141+
msg.role === 'user' ||
142+
msg.role === 'assistant' ||
143+
msg.role === 'system'
144+
) {
145+
return {
146+
role: msg.role,
147+
content: msg.content,
148+
};
149+
} else if (msg.role === 'tool_result') {
150+
// Ollama expects tool results as a 'tool' role
151+
return {
152+
role: 'tool',
153+
content: msg.content,
154+
tool_call_id: msg.tool_use_id,
155+
};
156+
} else if (msg.role === 'tool_use') {
157+
// We'll convert tool_use to assistant messages with tool_calls
158+
return {
159+
role: 'assistant',
160+
content: '',
161+
tool_calls: [
162+
{
163+
id: msg.id,
164+
name: msg.name,
165+
arguments: msg.content,
166+
},
167+
],
168+
};
169+
}
170+
// Default fallback for unknown message types
171+
return {
172+
role: 'user',
173+
content: (msg as any).content || '',
174+
};
175+
});
176+
}
177+
}

packages/agent/src/core/toolAgent/config.test.ts

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,27 +2,41 @@ import { describe, expect, it } from 'vitest';
22

33
import { createProvider } from '../llm/provider.js';
44

5+
import { getModel } from './config.js';
6+
57
describe('createProvider', () => {
68
it('should return the correct model for anthropic', () => {
79
const model = createProvider('anthropic', 'claude-3-7-sonnet-20250219');
810
expect(model).toBeDefined();
911
expect(model.provider).toBe('anthropic.messages');
1012
});
1113

12-
/*
13-
1414
it('should return the correct model for openai', () => {
1515
const model = createProvider('openai', 'gpt-4o-2024-05-13');
1616
expect(model).toBeDefined();
1717
expect(model.provider).toBe('openai.chat');
1818
});
19-
2019
it('should return the correct model for ollama', () => {
2120
const model = createProvider('ollama', 'llama3');
2221
expect(model).toBeDefined();
2322
expect(model.provider).toBe('ollama.chat');
2423
});
2524

25+
it('should return the correct model for ollama with custom base URL', () => {
26+
const model = getModel('ollama', 'llama3', {
27+
ollamaBaseUrl: 'http://custom-ollama:11434',
28+
});
29+
expect(model).toBeDefined();
30+
expect(model.provider).toBe('ollama.chat');
31+
});
32+
33+
/*
34+
it('should return the correct model for openai', () => {
35+
const model = getModel('openai', 'gpt-4o-2024-05-13');
36+
expect(model).toBeDefined();
37+
expect(model.provider).toBe('openai.chat');
38+
});
39+
2640
it('should return the correct model for xai', () => {
2741
const model = createProvider('xai', 'grok-1');
2842
expect(model).toBeDefined();
@@ -34,7 +48,7 @@ describe('createProvider', () => {
3448
expect(model).toBeDefined();
3549
expect(model.provider).toBe('mistral.chat');
3650
});
37-
*/
51+
*/
3852

3953
it('should throw an error for unknown provider', () => {
4054
expect(() => {

packages/agent/src/core/toolAgent/config.ts

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,14 @@ import * as fs from 'fs';
22
import * as os from 'os';
33
import * as path from 'path';
44

5+
import { createProvider, LLMProvider } from '../llm/provider.js';
56
import { ToolContext } from '../types';
67

78
/**
89
* Available model providers
910
*/
10-
export type ModelProvider = 'anthropic' | 'openai';
11+
export type ModelProvider = 'anthropic' | 'openai' | 'ollama';
1112
/*
12-
| 'openai'
13-
| 'ollama'
1413
| 'xai'
1514
| 'mistral'*/
1615

@@ -22,6 +21,35 @@ export type AgentConfig = {
2221
temperature: number;
2322
getSystemPrompt: (toolContext: ToolContext) => string;
2423
};
24+
25+
/**
26+
* Get the model instance based on provider and model name
27+
*/
28+
export function getModel(
29+
provider: ModelProvider,
30+
model: string,
31+
options?: { ollamaBaseUrl?: string },
32+
): LLMProvider {
33+
switch (provider) {
34+
case 'anthropic':
35+
return createProvider('anthropic', model);
36+
case 'openai':
37+
return createProvider('openai', model);
38+
case 'ollama':
39+
if (options?.ollamaBaseUrl) {
40+
return createProvider('ollama', model, {
41+
baseUrl: options.ollamaBaseUrl,
42+
});
43+
}
44+
return createProvider('ollama', model);
45+
/*case 'xai':
46+
return createProvider('xai', model);
47+
case 'mistral':
48+
return createProvider('mistral', model);*/
49+
default:
50+
throw new Error(`Unknown model provider: ${provider}`);
51+
}
52+
}
2553
/**
2654
* Default configuration for the tool agent
2755
*/

packages/agent/src/utils/errors.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ export const providerConfig: Record<
2121
docsUrl: 'https://mycoder.ai/docs/getting-started/mistral',
2222
},*/
2323
// No API key needed for ollama as it uses a local server
24-
//ollama: undefined,
24+
ollama: undefined,
2525
};
2626

2727
/**

packages/cli/README.md

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,29 +90,30 @@ Example configuration file:
9090
export default {
9191
// GitHub integration
9292
githubMode: true,
93-
93+
9494
// Browser settings
9595
headless: true,
9696
userSession: false,
9797
pageFilter: 'none', // 'simple', 'none', or 'readability'
98-
98+
9999
// Model settings
100100
provider: 'anthropic',
101101
model: 'claude-3-7-sonnet-20250219',
102102
maxTokens: 4096,
103103
temperature: 0.7,
104-
104+
105105
// Custom settings
106106
customPrompt: '',
107107
profile: false,
108108
tokenCache: true,
109-
109+
110110
// API keys (better to use environment variables for these)
111111
// ANTHROPIC_API_KEY: 'your-api-key',
112112
};
113113
```
114114

115115
MyCoder will search for configuration in the following places (in order of precedence):
116+
116117
1. CLI options (e.g., `--githubMode true`)
117118
2. Configuration file (`mycoder.config.js`, `.mycoderrc`, etc.)
118119
3. Default values

packages/cli/src/commands/$default.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,15 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
137137
process.env[keyName] = configApiKey;
138138
logger.debug(`Using ${keyName} from configuration`);
139139
}
140+
} else if (userModelProvider === 'ollama') {
141+
// For Ollama, we check if the base URL is set
142+
const ollamaBaseUrl = argv.ollamaBaseUrl || userConfig.ollamaBaseUrl;
143+
logger.debug(`Using Ollama with base URL: ${ollamaBaseUrl}`);
144+
} else {
145+
// Unknown provider
146+
logger.error(`Unknown provider: ${userModelProvider}`);
147+
throw new Error(`Unknown provider: ${userModelProvider}`);
140148
}
141-
// No API key check needed for Ollama as it uses a local server
142149

143150
let prompt: string | undefined;
144151

packages/cli/src/options.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ export type SharedOptions = {
1818
readonly githubMode?: boolean;
1919
readonly userWarning?: boolean;
2020
readonly upgradeCheck?: boolean;
21+
readonly ollamaBaseUrl?: string;
2122
};
2223

2324
export const sharedOptions = {
@@ -36,7 +37,7 @@ export const sharedOptions = {
3637
provider: {
3738
type: 'string',
3839
description: 'AI model provider to use',
39-
choices: ['anthropic' /*, 'openai', 'ollama', 'xai', 'mistral'*/],
40+
choices: ['anthropic', 'ollama' /*, 'openai', 'xai', 'mistral'*/],
4041
} as const,
4142
model: {
4243
type: 'string',
@@ -120,4 +121,8 @@ export const sharedOptions = {
120121
description: 'Disable version upgrade check (for automated/remote usage)',
121122
default: false,
122123
} as const,
124+
ollamaBaseUrl: {
125+
type: 'string',
126+
description: 'Base URL for Ollama API (default: http://localhost:11434)',
127+
} as const,
123128
};

0 commit comments

Comments
 (0)