Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ extensions/intellij/bin
extensions/.continue-debug/

*.vsix
local-artifacts/

# intellij module library files
*.iml
Expand Down
208 changes: 179 additions & 29 deletions core/config/onboarding.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,153 @@
import { ConfigYaml } from "@continuedev/config-yaml";

export const LOCAL_ONBOARDING_PROVIDER_TITLE = "Ollama";
export const LOCAL_ONBOARDING_FIM_MODEL = "qwen2.5-coder:1.5b-base";
export const LOCAL_ONBOARDING_FIM_TITLE = "Qwen2.5-Coder 1.5B";
export const LOCAL_ONBOARDING_CHAT_MODEL = "llama3.1:8b";
export const LOCAL_ONBOARDING_CHAT_TITLE = "Llama 3.1 8B";
export const LOCAL_ONBOARDING_EMBEDDINGS_MODEL = "nomic-embed-text:latest";
export const LOCAL_ONBOARDING_EMBEDDINGS_TITLE = "Nomic Embed";
export type LocalOnboardingProvider = "ollama" | "lmstudio";

export const DEFAULT_LOCAL_ONBOARDING_PROVIDER: LocalOnboardingProvider =
"ollama";

type LocalOnboardingModel = {
name: string;
provider: LocalOnboardingProvider;
model: string;
roles: (
| "chat"
| "autocomplete"
| "embed"
| "edit"
| "apply"
| "summarize"
| "subagent"
| "rerank"
)[];
};

type LocalOnboardingConfig = {
providerTitle: string;
chatTitle: string;
chatModel: string;
autocompleteTitle?: string;
autocompleteModel?: string;
embeddingsTitle?: string;
embeddingsModel?: string;
};

const OLLAMA_LOCAL_ONBOARDING_CONFIG: LocalOnboardingConfig = {
providerTitle: "Ollama",
chatTitle: "Llama 3.1 8B",
chatModel: "llama3.1:8b",
autocompleteTitle: "Qwen2.5-Coder 1.5B",
autocompleteModel: "qwen2.5-coder:1.5b-base",
embeddingsTitle: "Nomic Embed",
embeddingsModel: "nomic-embed-text:latest",
};

const LMSTUDIO_LOCAL_ONBOARDING_CONFIG: LocalOnboardingConfig = {
providerTitle: "LM Studio",
chatTitle: "LM Studio",
chatModel: "AUTODETECT",
};

export function getLocalOnboardingConfig(
provider: LocalOnboardingProvider = DEFAULT_LOCAL_ONBOARDING_PROVIDER,
): LocalOnboardingConfig {
return provider === "lmstudio"
? LMSTUDIO_LOCAL_ONBOARDING_CONFIG
: OLLAMA_LOCAL_ONBOARDING_CONFIG;
}

function dedupeModels(models?: string[]) {
if (!models?.length) {
return [];
}

return Array.from(
new Set(models.map((model) => model.trim()).filter(Boolean)),
);
}

function getPreferredModel(
models: string[],
matchers: RegExp[],
fallback: string,
): string {
return (
models.find((model) => matchers.some((matcher) => matcher.test(model))) ??
fallback
);
}

function getLmStudioOnboardingModels(
models?: string[],
): LocalOnboardingModel[] {
const availableModels = dedupeModels(models);
const fallbackConfig = getLocalOnboardingConfig("lmstudio");
const fallbackModel = availableModels[0] ?? fallbackConfig.chatModel;
const chatModel =
availableModels.find(
(model) =>
[/instruct/i, /chat/i, /assistant/i].some((matcher) =>
matcher.test(model),
) && !/(coder|code|codestral)/i.test(model),
) ??
getPreferredModel(
availableModels,
[/instruct/i, /chat/i, /assistant/i],
fallbackModel,
);
const autocompleteModel = getPreferredModel(
availableModels,
[/coder/i, /code/i, /codestral/i, /deepseek/i, /qwen/i],
chatModel,
);
const embeddingsModel = getPreferredModel(
availableModels,
[/embed/i, /embedding/i, /nomic/i, /bge/i, /\be5\b/i],
"",
);

const localModels: LocalOnboardingModel[] = [
{
name: chatModel,
provider: "lmstudio",
model: chatModel,
roles:
autocompleteModel === chatModel
? ["chat", "edit", "apply", "autocomplete"]
: ["chat", "edit", "apply"],
},
];

if (autocompleteModel !== chatModel) {
localModels.push({
name: autocompleteModel,
provider: "lmstudio",
model: autocompleteModel,
roles: ["autocomplete"],
});
}

if (embeddingsModel && embeddingsModel !== chatModel) {
localModels.push({
name: embeddingsModel,
provider: "lmstudio",
model: embeddingsModel,
roles: ["embed"],
});
}

return localModels;
}

export function getLocalOnboardingPrimaryModelTitle(
provider: LocalOnboardingProvider = DEFAULT_LOCAL_ONBOARDING_PROVIDER,
availableModels?: string[],
) {
if (provider === "lmstudio") {
return getLmStudioOnboardingModels(availableModels)[0]?.name ?? "LM Studio";
}

return getLocalOnboardingConfig(provider).chatTitle;
}

const ANTHROPIC_MODEL_CONFIG = {
slugs: ["anthropic/claude-3-7-sonnet", "anthropic/claude-4-sonnet"],
Expand Down Expand Up @@ -34,30 +175,39 @@ export function setupBestConfig(config: ConfigYaml): ConfigYaml {
};
}

export function setupLocalConfig(config: ConfigYaml): ConfigYaml {
export function setupLocalConfig(
config: ConfigYaml,
provider: LocalOnboardingProvider = DEFAULT_LOCAL_ONBOARDING_PROVIDER,
availableModels?: string[],
): ConfigYaml {
const onboardingConfig = getLocalOnboardingConfig(provider);
const localModels: LocalOnboardingModel[] =
provider === "lmstudio"
? getLmStudioOnboardingModels(availableModels)
: [
{
name: onboardingConfig.chatTitle,
provider: "ollama",
model: onboardingConfig.chatModel,
roles: ["chat", "edit", "apply"],
},
{
name: onboardingConfig.autocompleteTitle!,
provider: "ollama",
model: onboardingConfig.autocompleteModel!,
roles: ["autocomplete"],
},
{
name: onboardingConfig.embeddingsTitle!,
provider: "ollama",
model: onboardingConfig.embeddingsModel!,
roles: ["embed"],
},
];

return {
...config,
models: [
{
name: LOCAL_ONBOARDING_CHAT_TITLE,
provider: "ollama",
model: LOCAL_ONBOARDING_CHAT_MODEL,
roles: ["chat", "edit", "apply"],
},
{
name: LOCAL_ONBOARDING_FIM_TITLE,
provider: "ollama",
model: LOCAL_ONBOARDING_FIM_MODEL,
roles: ["autocomplete"],
},
{
name: LOCAL_ONBOARDING_EMBEDDINGS_TITLE,
provider: "ollama",
model: LOCAL_ONBOARDING_EMBEDDINGS_MODEL,
roles: ["embed"],
},
...(config.models ?? []),
],
models: [...localModels, ...(config.models ?? [])],
};
}

Expand Down
87 changes: 87 additions & 0 deletions core/config/onboarding.vitest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { describe, expect, it } from "vitest";

import {
getLocalOnboardingPrimaryModelTitle,
setupLocalConfig,
} from "./onboarding";

describe("setupLocalConfig", () => {
it("adds the default Ollama onboarding models", () => {
const result = setupLocalConfig({
name: "Local Config",
version: "0.0.1",
schema: "v1",
models: [],
});

expect(result.models?.slice(0, 3)).toEqual([
{
name: "Llama 3.1 8B",
provider: "ollama",
model: "llama3.1:8b",
roles: ["chat", "edit", "apply"],
},
{
name: "Qwen2.5-Coder 1.5B",
provider: "ollama",
model: "qwen2.5-coder:1.5b-base",
roles: ["autocomplete"],
},
{
name: "Nomic Embed",
provider: "ollama",
model: "nomic-embed-text:latest",
roles: ["embed"],
},
]);
});

it("builds an LM Studio config from detected local models", () => {
const result = setupLocalConfig(
{
name: "Local Config",
version: "0.0.1",
schema: "v1",
models: [],
},
"lmstudio",
[
"text-embedding-nomic-embed-text-v1.5",
"Qwen2.5-Coder-7B-Instruct-GGUF",
"Meta-Llama-3.1-8B-Instruct-GGUF",
],
);

expect(result.models).toEqual([
{
name: "Meta-Llama-3.1-8B-Instruct-GGUF",
provider: "lmstudio",
model: "Meta-Llama-3.1-8B-Instruct-GGUF",
roles: ["chat", "edit", "apply"],
},
{
name: "Qwen2.5-Coder-7B-Instruct-GGUF",
provider: "lmstudio",
model: "Qwen2.5-Coder-7B-Instruct-GGUF",
roles: ["autocomplete"],
},
{
name: "text-embedding-nomic-embed-text-v1.5",
provider: "lmstudio",
model: "text-embedding-nomic-embed-text-v1.5",
roles: ["embed"],
},
]);
});
});

describe("getLocalOnboardingPrimaryModelTitle", () => {
it("returns the chosen LM Studio chat model title", () => {
expect(
getLocalOnboardingPrimaryModelTitle("lmstudio", [
"Qwen2.5-Coder-7B-Instruct-GGUF",
"Meta-Llama-3.1-8B-Instruct-GGUF",
]),
).toBe("Meta-Llama-3.1-8B-Instruct-GGUF");
});
});
15 changes: 12 additions & 3 deletions core/core.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import { CodebaseIndexer } from "./indexing/CodebaseIndexer";
import DocsService from "./indexing/docs/DocsService";
import { countTokens } from "./llm/countTokens";
import Lemonade from "./llm/llms/Lemonade";
import LMStudio from "./llm/llms/LMStudio";
import Ollama from "./llm/llms/Ollama";
import { EditAggregator } from "./nextEdit/context/aggregateEdits";
import { createNewPromptFileV2 } from "./promptFiles/createNewPromptFile";
Expand Down Expand Up @@ -1395,6 +1396,9 @@ export class Core {
if (msg.data.title === "Ollama") {
const models = await new Ollama({ model: "" }).listModels();
return models;
} else if (msg.data.title === "LM Studio") {
const models = await new LMStudio({ model: "" }).listModels();
return models;
} else if (msg.data.title === "Lemonade") {
const models = await new Lemonade({ model: "" }).listModels();
return models;
Expand All @@ -1403,21 +1407,26 @@ export class Core {
}
}
} catch (e) {
console.debug(`Error listing Ollama models: ${e}`);
console.debug(`Error listing models for ${msg.data.title}: ${e}`);
return undefined;
}
}

private async handleCompleteOnboarding(
msg: Message<CompleteOnboardingPayload>,
) {
const { mode, provider, apiKey } = msg.data;
const { mode, provider, apiKey, localModelTitles } = msg.data;

let editConfigYamlCallback: (config: ConfigYaml) => ConfigYaml;

switch (mode) {
case OnboardingModes.LOCAL:
editConfigYamlCallback = setupLocalConfig;
editConfigYamlCallback = (config: ConfigYaml) =>
setupLocalConfig(
config,
provider === "lmstudio" ? "lmstudio" : "ollama",
localModelTitles,
);
break;

case OnboardingModes.API_KEY:
Expand Down
1 change: 1 addition & 0 deletions core/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1948,6 +1948,7 @@ export interface CompleteOnboardingPayload {
mode: OnboardingModes;
provider?: string;
apiKey?: string;
localModelTitles?: string[];
}

export interface CompiledMessagesResult {
Expand Down
Loading
Loading