Skip to content

Commit d5c3a96

Browse files
committed
feat: add Ollama configuration options
1 parent 597211b commit d5c3a96

File tree

7 files changed

+80
-34
lines changed

7 files changed

+80
-34
lines changed

packages/agent/src/core/llm/providers/ollama.ts

Lines changed: 36 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ export class OllamaProvider implements LLMProvider {
2929

3030
constructor(model: string, options: OllamaOptions = {}) {
3131
this.model = model;
32-
this.baseUrl = options.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
32+
this.baseUrl =
33+
options.baseUrl ||
34+
process.env.OLLAMA_BASE_URL ||
35+
'http://localhost:11434';
3336

3437
// Ensure baseUrl doesn't end with a slash
3538
if (this.baseUrl.endsWith('/')) {
@@ -41,7 +44,15 @@ export class OllamaProvider implements LLMProvider {
4144
* Generate text using Ollama API
4245
*/
4346
async generateText(options: GenerateOptions): Promise<LLMResponse> {
44-
const { messages, functions, temperature = 0.7, maxTokens, topP, frequencyPenalty, presencePenalty } = options;
47+
const {
48+
messages,
49+
functions,
50+
temperature = 0.7,
51+
maxTokens,
52+
topP,
53+
frequencyPenalty,
54+
presencePenalty,
55+
} = options;
4556

4657
// Format messages for Ollama API
4758
const formattedMessages = this.formatMessages(messages);
@@ -56,8 +67,12 @@ export class OllamaProvider implements LLMProvider {
5667
temperature: temperature,
5768
// Ollama uses top_k instead of top_p, but we'll include top_p if provided
5869
...(topP !== undefined && { top_p: topP }),
59-
...(frequencyPenalty !== undefined && { frequency_penalty: frequencyPenalty }),
60-
...(presencePenalty !== undefined && { presence_penalty: presencePenalty }),
70+
...(frequencyPenalty !== undefined && {
71+
frequency_penalty: frequencyPenalty,
72+
}),
73+
...(presencePenalty !== undefined && {
74+
presence_penalty: presencePenalty,
75+
}),
6176
},
6277
};
6378

@@ -93,11 +108,14 @@ export class OllamaProvider implements LLMProvider {
93108

94109
// Extract content and tool calls
95110
const content = data.message?.content || '';
96-
const toolCalls = data.message?.tool_calls?.map((toolCall: any) => ({
97-
id: toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
98-
name: toolCall.name,
99-
content: JSON.stringify(toolCall.args || toolCall.arguments || {}),
100-
})) || [];
111+
const toolCalls =
112+
data.message?.tool_calls?.map((toolCall: any) => ({
113+
id:
114+
toolCall.id ||
115+
`tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
116+
name: toolCall.name,
117+
content: JSON.stringify(toolCall.args || toolCall.arguments || {}),
118+
})) || [];
101119

102120
// Create token usage from response data
103121
const tokenUsage = new TokenUsage();
@@ -110,9 +128,7 @@ export class OllamaProvider implements LLMProvider {
110128
tokenUsage: tokenUsage,
111129
};
112130
} catch (error) {
113-
throw new Error(
114-
`Error calling Ollama API: ${(error as Error).message}`,
115-
);
131+
throw new Error(`Error calling Ollama API: ${(error as Error).message}`);
116132
}
117133
}
118134

@@ -121,7 +137,11 @@ export class OllamaProvider implements LLMProvider {
121137
*/
122138
private formatMessages(messages: Message[]): any[] {
123139
return messages.map((msg) => {
124-
if (msg.role === 'user' || msg.role === 'assistant' || msg.role === 'system') {
140+
if (
141+
msg.role === 'user' ||
142+
msg.role === 'assistant' ||
143+
msg.role === 'system'
144+
) {
125145
return {
126146
role: msg.role,
127147
content: msg.content,
@@ -147,11 +167,11 @@ export class OllamaProvider implements LLMProvider {
147167
],
148168
};
149169
}
150-
// Default fallback
170+
// Default fallback for unknown message types
151171
return {
152172
role: 'user',
153-
content: msg.content,
173+
content: (msg as any).content || '',
154174
};
155175
});
156176
}
157-
}
177+
}

packages/agent/src/core/toolAgent/config.test.ts

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,20 +9,27 @@ describe('getModel', () => {
99
expect(model.provider).toBe('anthropic.messages');
1010
});
1111

12-
/*
13-
14-
it('should return the correct model for openai', () => {
15-
const model = getModel('openai', 'gpt-4o-2024-05-13');
12+
it('should return the correct model for ollama', () => {
13+
const model = getModel('ollama', 'llama3');
1614
expect(model).toBeDefined();
17-
expect(model.provider).toBe('openai.chat');
15+
expect(model.provider).toBe('ollama.chat');
1816
});
1917

20-
it('should return the correct model for ollama', () => {
21-
const model = getModel('ollama', 'llama3');
18+
it('should return the correct model for ollama with custom base URL', () => {
19+
const model = getModel('ollama', 'llama3', {
20+
ollamaBaseUrl: 'http://custom-ollama:11434',
21+
});
2222
expect(model).toBeDefined();
2323
expect(model.provider).toBe('ollama.chat');
2424
});
2525

26+
/*
27+
it('should return the correct model for openai', () => {
28+
const model = getModel('openai', 'gpt-4o-2024-05-13');
29+
expect(model).toBeDefined();
30+
expect(model.provider).toBe('openai.chat');
31+
});
32+
2633
it('should return the correct model for xai', () => {
2734
const model = getModel('xai', 'grok-1');
2835
expect(model).toBeDefined();
@@ -34,7 +41,7 @@ describe('getModel', () => {
3441
expect(model).toBeDefined();
3542
expect(model.provider).toBe('mistral.chat');
3643
});
37-
*/
44+
*/
3845

3946
it('should throw an error for unknown provider', () => {
4047
expect(() => {

packages/agent/src/core/toolAgent/config.ts

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,29 +8,32 @@ import { ToolContext } from '../types';
88
/**
99
* Available model providers
1010
*/
11-
export type ModelProvider = 'anthropic';
11+
export type ModelProvider = 'anthropic' | 'ollama';
1212
/*
1313
| 'openai'
14-
| 'ollama'
1514
| 'xai'
1615
| 'mistral'*/
1716

1817
/**
1918
* Get the model instance based on provider and model name
2019
*/
21-
export function getModel(provider: ModelProvider, model: string): LLMProvider {
20+
export function getModel(
21+
provider: ModelProvider,
22+
model: string,
23+
options?: { ollamaBaseUrl?: string },
24+
): LLMProvider {
2225
switch (provider) {
2326
case 'anthropic':
2427
return createProvider('anthropic', model);
25-
/*case 'openai':
26-
return createProvider('openai', model);
2728
case 'ollama':
2829
if (options?.ollamaBaseUrl) {
2930
return createProvider('ollama', model, {
3031
baseUrl: options.ollamaBaseUrl,
3132
});
3233
}
3334
return createProvider('ollama', model);
35+
/*case 'openai':
36+
return createProvider('openai', model);
3437
case 'xai':
3538
return createProvider('xai', model);
3639
case 'mistral':

packages/agent/src/utils/errors.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ export const providerConfig: Record<
2121
docsUrl: 'https://mycoder.ai/docs/getting-started/mistral',
2222
},*/
2323
// No API key needed for ollama as it uses a local server
24-
//ollama: undefined,
24+
ollama: undefined,
2525
};
2626

2727
/**

packages/cli/src/commands/$default.ts

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,15 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
136136
process.env[keyName] = configApiKey;
137137
logger.debug(`Using ${keyName} from configuration`);
138138
}
139+
} else if (userModelProvider === 'ollama') {
140+
// For Ollama, we check if the base URL is set
141+
const ollamaBaseUrl = argv.ollamaBaseUrl || userConfig.ollamaBaseUrl;
142+
logger.debug(`Using Ollama with base URL: ${ollamaBaseUrl}`);
143+
} else {
144+
// Unknown provider
145+
logger.error(`Unknown provider: ${userModelProvider}`);
146+
throw new Error(`Unknown provider: ${userModelProvider}`);
139147
}
140-
// No API key check needed for Ollama as it uses a local server
141148

142149
let prompt: string | undefined;
143150

@@ -193,12 +200,14 @@ export const command: CommandModule<SharedOptions, DefaultArgs> = {
193200
const agentConfig = {
194201
...DEFAULT_CONFIG,
195202
model: getModel(
196-
userModelProvider as 'anthropic' /*
203+
userModelProvider as 'anthropic' | 'ollama' /*
197204
| 'openai'
198-
| 'ollama'
199205
| 'xai'
200206
| 'mistral'*/,
201207
userModelName,
208+
{
209+
ollamaBaseUrl: argv.ollamaBaseUrl || userConfig.ollamaBaseUrl,
210+
},
202211
),
203212
maxTokens: userMaxTokens,
204213
temperature: userTemperature,

packages/cli/src/options.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ export type SharedOptions = {
1818
readonly githubMode?: boolean;
1919
readonly userWarning?: boolean;
2020
readonly upgradeCheck?: boolean;
21+
readonly ollamaBaseUrl?: string;
2122
};
2223

2324
export const sharedOptions = {
@@ -36,7 +37,7 @@ export const sharedOptions = {
3637
provider: {
3738
type: 'string',
3839
description: 'AI model provider to use',
39-
choices: ['anthropic' /*, 'openai', 'ollama', 'xai', 'mistral'*/],
40+
choices: ['anthropic', 'ollama' /*, 'openai', 'xai', 'mistral'*/],
4041
} as const,
4142
model: {
4243
type: 'string',
@@ -120,4 +121,8 @@ export const sharedOptions = {
120121
description: 'Disable version upgrade check (for automated/remote usage)',
121122
default: false,
122123
} as const,
124+
ollamaBaseUrl: {
125+
type: 'string',
126+
description: 'Base URL for Ollama API (default: http://localhost:11434)',
127+
} as const,
123128
};

packages/cli/src/settings/config.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,8 @@ const defaultConfig = {
5454
customPrompt: '',
5555
profile: false,
5656
tokenCache: true,
57+
// Ollama configuration
58+
ollamaBaseUrl: 'http://localhost:11434',
5759
// API keys (empty by default)
5860
ANTHROPIC_API_KEY: '',
5961
};

0 commit comments

Comments
 (0)