From bcdd84a3f99e944d8a18f59db2fe32fbe0b4ebfb Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Sun, 7 Dec 2025 22:18:20 +0100 Subject: [PATCH 1/7] feat(ollama): add ollama types and meta --- .../ai-ollama/src/meta/model-meta-athene.ts | 52 ++ .../ai-ollama/src/meta/model-meta-aya.ts | 62 +++ .../src/meta/model-meta-codegemma.ts | 66 +++ .../src/meta/model-meta-codellama.ts | 94 ++++ .../src/meta/model-meta-command-r-plus.ts | 52 ++ .../src/meta/model-meta-command-r.ts | 52 ++ .../src/meta/model-meta-command-r7b.ts | 52 ++ .../src/meta/model-meta-deepseek-coder-v2.ts | 66 +++ .../src/meta/model-meta-deepseek-ocr.ts | 53 ++ .../src/meta/model-meta-deepseek-r1.ts | 122 +++++ .../src/meta/model-meta-deepseek-v3.1.ts | 67 +++ .../ai-ollama/src/meta/model-meta-devstral.ts | 52 ++ .../ai-ollama/src/meta/model-meta-dolphin3.ts | 49 ++ .../src/meta/model-meta-exaone3.5.ts | 80 +++ .../ai-ollama/src/meta/model-meta-falcon2.ts | 49 ++ .../ai-ollama/src/meta/model-meta-falcon3.ts | 94 ++++ .../src/meta/model-meta-firefunction-v2.ts | 52 ++ .../ai-ollama/src/meta/model-meta-gemma.ts | 66 +++ .../ai-ollama/src/meta/model-meta-gemma2.ts | 80 +++ .../ai-ollama/src/meta/model-meta-gemma3.ts | 108 ++++ .../src/meta/model-meta-granite3-dense.ts | 66 +++ .../src/meta/model-meta-granite3-guardian.ts | 66 +++ .../src/meta/model-meta-granite3-moe.ts | 66 +++ .../src/meta/model-meta-granite3.1-dense.ts | 66 +++ .../src/meta/model-meta-granite3.1-moe.ts | 66 +++ .../src/meta/model-meta-llama-guard3.ts | 66 +++ .../ai-ollama/src/meta/model-meta-llama2.ts | 80 +++ .../src/meta/model-meta-llama3-chatqa.ts | 66 +++ .../src/meta/model-meta-llama3-gradient.ts | 66 +++ .../ai-ollama/src/meta/model-meta-llama3.1.ts | 80 +++ .../src/meta/model-meta-llama3.2-vision.ts | 66 +++ .../ai-ollama/src/meta/model-meta-llama3.2.ts | 66 +++ .../ai-ollama/src/meta/model-meta-llama3.3.ts | 52 ++ .../ai-ollama/src/meta/model-meta-llama3.ts | 66 +++ .../ai-ollama/src/meta/model-meta-llama4.ts | 66 +++ .../src/meta/model-meta-llava-llama3.ts | 52 ++ .../src/meta/model-meta-llava-phi3.ts | 52 ++ .../ai-ollama/src/meta/model-meta-llava.ts | 80 +++ .../ai-ollama/src/meta/model-meta-marco-o1.ts | 49 ++ .../src/meta/model-meta-mistral-large.ts | 52 ++ .../src/meta/model-meta-mistral-nemo.ts | 52 ++ .../src/meta/model-meta-mistral-small.ts | 66 +++ .../ai-ollama/src/meta/model-meta-mistral.ts | 49 ++ .../ai-ollama/src/meta/model-meta-mixtral.ts | 66 +++ .../src/meta/model-meta-moondream.ts | 52 ++ .../src/meta/model-meta-nemotron-mini.ts | 52 ++ .../ai-ollama/src/meta/model-meta-nemotron.ts | 52 ++ .../ai-ollama/src/meta/model-meta-olmo2.ts | 66 +++ .../src/meta/model-meta-opencoder.ts | 66 +++ .../src/meta/model-meta-openhermes.ts | 66 +++ .../ai-ollama/src/meta/model-meta-phi3.ts | 66 +++ .../ai-ollama/src/meta/model-meta-phi4.ts | 49 ++ .../ai-ollama/src/meta/model-meta-qwen.ts | 150 ++++++ .../src/meta/model-meta-qwen2.5-coder.ts | 121 +++++ .../ai-ollama/src/meta/model-meta-qwen2.5.ts | 122 +++++ .../ai-ollama/src/meta/model-meta-qwen2.ts | 94 ++++ .../ai-ollama/src/meta/model-meta-qwen3.ts | 150 ++++++ .../ai-ollama/src/meta/model-meta-qwq.ts | 49 ++ .../ai-ollama/src/meta/model-meta-sailor2.ts | 79 +++ .../src/meta/model-meta-shieldgemma.ts | 80 +++ .../src/meta/model-meta-smalltinker.ts | 52 ++ .../ai-ollama/src/meta/model-meta-smollm.ts | 80 +++ .../src/meta/model-meta-tinyllama.ts | 52 ++ .../ai-ollama/src/meta/model-meta-tulu3.ts | 66 +++ .../ai-ollama/src/meta/models-meta.ts | 11 + .../typescript/ai-ollama/src/model-meta.ts | 265 ++++++++++ .../ai-ollama/src/ollama-adapter.ts | 481 ++++++++++++++++++ 67 files changed, 5211 insertions(+) create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-athene.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-aya.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-llava.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts create mode 100644 packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts create mode 100644 packages/typescript/ai-ollama/src/meta/models-meta.ts create mode 100644 packages/typescript/ai-ollama/src/model-meta.ts create mode 100644 packages/typescript/ai-ollama/src/ollama-adapter.ts diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts new file mode 100644 index 00000000..5e10c76a --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const ATHENE_V2_LATEST = { + name: 'athene-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const ATHENE_V2_72b = { + name: 'athene-v2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const ATHENE_MODELS = [ + ATHENE_V2_LATEST.name, + ATHENE_V2_72b.name, +] as const + +// const ATHENE_IMAGE_MODELS = [] as const + +// export const ATHENE_EMBEDDING_MODELS = [] as const + +// const ATHENE_AUDIO_MODELS = [] as const + +// const ATHENE_VIDEO_MODELS = [] as const + +// export type AtheneChatModels = (typeof ATHENE_MODELS)[number] + +// Manual type map for per-model provider options +export type AtheneChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [ATHENE_V2_LATEST.name]: ChatRequest + [ATHENE_V2_72b.name]: ChatRequest +} + +export type AtheneModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input + [ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts new file mode 100644 index 00000000..38a93989 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -0,0 +1,62 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const AYA_LATEST = { + name: 'aya:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const AYA_8b = { + name: 'aya:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const AYA_35b = { + name: 'aya:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '20gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const + +// const AYA_IMAGE_MODELS = [] as const + +// export const AYA_EMBEDDING_MODELS = [] as const + +// const AYA_AUDIO_MODELS = [] as const + +// const AYA_VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof AYA_MODELS)[number] + +// Manual type map for per-model provider options +export type AyaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [AYA_LATEST.name]: ChatRequest + [AYA_8b.name]: ChatRequest + [AYA_35b.name]: ChatRequest +} + +export type AyaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [AYA_LATEST.name]: typeof AYA_LATEST.supports.input + [AYA_8b.name]: typeof AYA_8b.supports.input + [AYA_35b.name]: typeof AYA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts new file mode 100644 index 00000000..f424c512 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const CODEGEMMA_LATEST = { + name: 'codegemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const CODEGEMMA_8b = { + name: 'codegemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.65gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const CODEGEMMA_35b = { + name: 'codegemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const CODEGEMMA_MODELS = [ + CODEGEMMA_LATEST.name, + CODEGEMMA_8b.name, + CODEGEMMA_35b.name, +] as const + +// const CODEGEMMA_IMAGE_MODELS = [] as const + +// export const CODEGEMMA_EMBEDDING_MODELS = [] as const + +// const CODEGEMMA_AUDIO_MODELS = [] as const + +// const CODEGEMMA_VIDEO_MODELS = [] as const + +// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodegemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODEGEMMA_LATEST.name]: ChatRequest + [CODEGEMMA_8b.name]: ChatRequest + [CODEGEMMA_35b.name]: ChatRequest +} + +export type CodegemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input + [CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input + [CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts new file mode 100644 index 00000000..df9a7786 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const CODELLAMA_LATEST = { + name: 'codellama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_7b = { + name: 'codellama:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_13b = { + name: 'codellama:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_34b = { + name: 'codellama:34b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const CODELLAMA_70b = { + name: 'codellama:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const CODELLAMA_MODELS = [ + CODELLAMA_LATEST.name, + CODELLAMA_7b.name, + CODELLAMA_13b.name, + CODELLAMA_34b.name, + CODELLAMA_70b.name, +] as const + +// const CODELLAMA_IMAGE_MODELS = [] as const + +// export const CODELLAMA_EMBEDDING_MODELS = [] as const + +// const CODELLAMA_AUDIO_MODELS = [] as const + +// const CODELLAMA_VIDEO_MODELS = [] as const + +// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type CodellamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [CODELLAMA_LATEST.name]: ChatRequest + [CODELLAMA_7b.name]: ChatRequest + [CODELLAMA_13b.name]: ChatRequest + [CODELLAMA_34b.name]: ChatRequest + [CODELLAMA_70b.name]: ChatRequest +} + +export type CodellamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input + [CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input + [CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input + [CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input + [CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts new file mode 100644 index 00000000..84971561 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_PLUS_LATEST = { + name: 'command-r-plus:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_PLUS_104b = { + name: 'command-r-plus:104b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '59gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_PLUS_MODELS = [ + COMMAND_R_PLUS_LATEST.name, + COMMAND_R_PLUS_104b.name, +] as const + +// const COMMAND_R_PLUS_IMAGE_MODELS = [] as const + +// export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_PLUS_AUDIO_MODELS = [] as const + +// const COMMAND_R_PLUS_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRPlusChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_PLUS_LATEST.name]: ChatRequest + [COMMAND_R_PLUS_104b.name]: ChatRequest +} + +export type CommandRPlusModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input + [COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts new file mode 100644 index 00000000..a4f47e07 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_LATEST = { + name: 'command-r:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_35b = { + name: 'command-r:35b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '19gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_MODELS = [ + COMMAND_R_LATEST.name, + COMMAND_R_35b.name, +] as const + +// const COMMAND_R_IMAGE_MODELS = [] as const + +// export const COMMAND_R_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_AUDIO_MODELS = [] as const + +// const COMMAND_R_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandRChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_LATEST.name]: ChatRequest + [COMMAND_R_35b.name]: ChatRequest +} + +export type CommandRModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_LATEST.name]: typeof COMMAND_R_LATEST.supports.input + [COMMAND_R_35b.name]: typeof COMMAND_R_35b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts new file mode 100644 index 00000000..e510d404 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const COMMAND_R_7b_LATEST = { + name: 'command-r7b:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const COMMAND_R_7b_7b = { + name: 'command-r7b:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5.1gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const COMMAND_R_7b_MODELS = [ + COMMAND_R_7b_LATEST.name, + COMMAND_R_7b_7b.name, +] as const + +// const COMMAND_R_7b_IMAGE_MODELS = [] as const + +// export const COMMAND_R_7b_EMBEDDING_MODELS = [] as const + +// const COMMAND_R_7b_AUDIO_MODELS = [] as const + +// const COMMAND_R_7b_VIDEO_MODELS = [] as const + +// export type CommandRChatModels = (typeof COMMAND_R7b_MODELS)[number] + +// Manual type map for per-model provider options +export type CommandR7bChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [COMMAND_R_7b_LATEST.name]: ChatRequest + [COMMAND_R_7b_7b.name]: ChatRequest +} + +export type CommandR7bModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [COMMAND_R_7b_LATEST.name]: typeof COMMAND_R_7b_LATEST.supports.input + [COMMAND_R_7b_7b.name]: typeof COMMAND_R_7b_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts new file mode 100644 index 00000000..eada87a9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_CODER_V2_LATEST = { + name: 'deepseek-coder-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 160_900, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_CODER_V2_16b = { + name: 'deepseek-coder-v2:16b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.9gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_CODER_V2_236b = { + name: 'deepseek-coder-v2:236b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '133gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_CODER_V2_MODELS = [ + DEEPSEEK_CODER_V2_LATEST.name, + DEEPSEEK_CODER_V2_16b.name, + DEEPSEEK_CODER_V2_236b.name, +] as const + +// const DEEPSEEK_CODER_V2_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_CODER_V2_EMBEDDING_MODELS = [] as const + +// const DEEPSEEK_CODER_V2_AUDIO_MODELS = [] as const + +// const DEEPSEEK_CODER_V2_VIDEO_MODELS = [] as const + +// export type DeepseekCoderV2ChatModels = (typeof DEEPSEEK_CODER_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekCoderV2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_CODER_V2_LATEST.name]: ChatRequest + [DEEPSEEK_CODER_V2_16b.name]: ChatRequest + [DEEPSEEK_CODER_V2_236b.name]: ChatRequest +} + +export type DeepseekCoderV2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_CODER_V2_LATEST.name]: typeof DEEPSEEK_CODER_V2_LATEST.supports.input + [DEEPSEEK_CODER_V2_16b.name]: typeof DEEPSEEK_CODER_V2_16b.supports.input + [DEEPSEEK_CODER_V2_236b.name]: typeof DEEPSEEK_CODER_V2_236b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts new file mode 100644 index 00000000..e3fa0d6c --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -0,0 +1,53 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_OCR_LATEST = { + name: 'deepseek-ocr:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '6.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_OCR_3b = { + name: 'deepseek-ocr:3b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + + size: '6.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_OCR_MODELS = [ + DEEPSEEK_OCR_LATEST.name, + DEEPSEEK_OCR_3b.name, +] as const + +// export const DEEPSEEK_OCR_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_OCR_EMBEDDING_MODELS = [] as const + +// export const DEEPSEEK_OCR_AUDIO_MODELS = [] as const + +// export const DEEPSEEK_OCR_VIDEO_MODELS = [] as const + +// export type DeepseekOcrChatModels = (typeof DEEPSEEK_OCR__MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekOcrChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_OCR_LATEST.name]: ChatRequest + [DEEPSEEK_OCR_3b.name]: ChatRequest +} + +export type DeepseekOcrModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_OCR_LATEST.name]: typeof DEEPSEEK_OCR_LATEST.supports.input + [DEEPSEEK_OCR_3b.name]: typeof DEEPSEEK_OCR_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts new file mode 100644 index 00000000..470efd36 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -0,0 +1,122 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_R1_LATEST = { + name: 'deepseek-r1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_1_5b = { + name: 'deepseek-r1:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_7b = { + name: 'deepseek-r1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '4.7gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_8b = { + name: 'deepseek-r1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_32b = { + name: 'deepseek-r1:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_70b = { + name: 'deepseek-r1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_R1_671b = { + name: 'deepseek-r1:671b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_R1_MODELS = [ + DEEPSEEK_R1_LATEST.name, + DEEPSEEK_R1_1_5b.name, + DEEPSEEK_R1_7b.name, + DEEPSEEK_R1_8b.name, + DEEPSEEK_R1_32b.name, + DEEPSEEK_R1_70b.name, + DEEPSEEK_R1_671b.name, +] as const + +// const DEEPSEEK_R1_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_R1_EMBEDDING_MODELS = [] as const + +// const DEEPSEEK_R1_AUDIO_MODELS = [] as const + +// const DEEPSEEK_R1_VIDEO_MODELS = [] as const + +// export type DeepseekChatModels = (typeof DEEPSEEK_R1_MODELS)[number] + +// Manual type map for per-model provider options +export type DeepseekR1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_R1_LATEST.name]: ChatRequest + [DEEPSEEK_R1_1_5b.name]: ChatRequest + [DEEPSEEK_R1_7b.name]: ChatRequest + [DEEPSEEK_R1_8b.name]: ChatRequest + [DEEPSEEK_R1_32b.name]: ChatRequest + [DEEPSEEK_R1_70b.name]: ChatRequest + [DEEPSEEK_R1_671b.name]: ChatRequest +} + +export type DeepseekR1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_R1_LATEST.name]: typeof DEEPSEEK_R1_LATEST.supports.input + [DEEPSEEK_R1_1_5b.name]: typeof DEEPSEEK_R1_1_5b.supports.input + [DEEPSEEK_R1_7b.name]: typeof DEEPSEEK_R1_7b.supports.input + [DEEPSEEK_R1_8b.name]: typeof DEEPSEEK_R1_8b.supports.input + [DEEPSEEK_R1_32b.name]: typeof DEEPSEEK_R1_32b.supports.input + [DEEPSEEK_R1_70b.name]: typeof DEEPSEEK_R1_70b.supports.input + [DEEPSEEK_R1_671b.name]: typeof DEEPSEEK_R1_671b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts new file mode 100644 index 00000000..413413ed --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -0,0 +1,67 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEEPSEEK_V3_1_LATEST = { + name: 'deepseek-v3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_V3_1_671b = { + name: 'deepseek-v3.1:671', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +const DEEPSEEK_V3_1_671b_cloud = { + name: 'deepseek-v3.1:671-cloud', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '404gb', + context: 160_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEEPSEEK_V3_1_MODELS = [ + DEEPSEEK_V3_1_LATEST.name, + DEEPSEEK_V3_1_671b.name, + DEEPSEEK_V3_1_671b_cloud.name, +] as const + +// export const DEEPSEEK_V3_1_IMAGE_MODELS = [] as const + +// export const DEEPSEEK_V3_1_EMBEDDING_MODELS = [] as const + +// export const DEEPSEEK_V3_1_AUDIO_MODELS = [] as const + +// export const DEEPSEEK_V3_1_VIDEO_MODELS = [] as const + +// export type DeepseekV3_1ChatModels = (typeof DEEPSEEK_V3_1__MODELS)[number] + +// Manual type map for per-model provider options +export type Deepseekv3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEEPSEEK_V3_1_LATEST.name]: ChatRequest + [DEEPSEEK_V3_1_671b.name]: ChatRequest + [DEEPSEEK_V3_1_671b_cloud.name]: ChatRequest +} + +export type Deepseekv3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEEPSEEK_V3_1_LATEST.name]: typeof DEEPSEEK_V3_1_LATEST.supports.input + [DEEPSEEK_V3_1_671b.name]: typeof DEEPSEEK_V3_1_671b.supports.input + [DEEPSEEK_V3_1_671b_cloud.name]: typeof DEEPSEEK_V3_1_671b_cloud.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts new file mode 100644 index 00000000..246729ca --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DEVSTRAL_LATEST = { + name: 'devstral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DEVSTRAL_24b = { + name: 'devstral:24b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DEVSTRAL_MODELS = [ + DEVSTRAL_LATEST.name, + DEVSTRAL_24b.name, +] as const + +// const DEVSTRAL_IMAGE_MODELS = [] as const + +// export const DEVSTRAL_EMBEDDING_MODELS = [] as const + +// const DEVSTRAL_AUDIO_MODELS = [] as const + +// const DEVSTRAL_VIDEO_MODELS = [] as const + +// export type DevstralChatModels = (typeof DEVSTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type DevstralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DEVSTRAL_LATEST.name]: ChatRequest + [DEVSTRAL_24b.name]: ChatRequest +} + +export type DevstralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DEVSTRAL_LATEST.name]: typeof DEVSTRAL_LATEST.supports.input + [DEVSTRAL_24b.name]: typeof DEVSTRAL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts new file mode 100644 index 00000000..f45d44b4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const DOLPHIN3_LATEST = { + name: 'dolphin3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const DOLPHIN3_8b = { + name: 'dolphin3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const + +// const DOLPHIN3_IMAGE_MODELS = [] as const + +// export const DOLPHIN3_EMBEDDING_MODELS = [] as const + +// const DOLPHIN3_AUDIO_MODELS = [] as const + +// const DOLPHIN3_VIDEO_MODELS = [] as const + +// export type Dolphin3ChatModels = (typeof DOLPHIN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Dolphin3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [DOLPHIN3_LATEST.name]: ChatRequest + [DOLPHIN3_8b.name]: ChatRequest +} + +export type Dolphin3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [DOLPHIN3_LATEST.name]: typeof DOLPHIN3_LATEST.supports.input + [DOLPHIN3_8b.name]: typeof DOLPHIN3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts new file mode 100644 index 00000000..131f57c1 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const EXAONE3_5_LATEST = { + name: 'exaone3.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_2_4b = { + name: 'exaone3.5:2.4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_7_1b = { + name: 'exaone3.5:7.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.8gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const EXAONE3_5_32b = { + name: 'exaone3.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '19gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const EXAONE3_5MODELS = [ + EXAONE3_5_LATEST.name, + EXAONE3_5_2_4b.name, + EXAONE3_5_7_1b.name, + EXAONE3_5_32b.name, +] as const + +// const EXAONE3_5IMAGE_MODELS = [] as const + +// export const EXAONE3_5EMBEDDING_MODELS = [] as const + +// const EXAONE3_5AUDIO_MODELS = [] as const + +// const EXAONE3_5VIDEO_MODELS = [] as const + +// export type AyaChatModels = (typeof EXAONE3_5MODELS)[number] + +// Manual type map for per-model provider options +export type Exaone3_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [EXAONE3_5_LATEST.name]: ChatRequest + [EXAONE3_5_2_4b.name]: ChatRequest + [EXAONE3_5_7_1b.name]: ChatRequest + [EXAONE3_5_32b.name]: ChatRequest +} + +export type Exaone3_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [EXAONE3_5_LATEST.name]: typeof EXAONE3_5_LATEST.supports.input + [EXAONE3_5_2_4b.name]: typeof EXAONE3_5_2_4b.supports.input + [EXAONE3_5_7_1b.name]: typeof EXAONE3_5_7_1b.supports.input + [EXAONE3_5_32b.name]: typeof EXAONE3_5_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts new file mode 100644 index 00000000..f353b2f4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FALCON2_LATEST = { + name: 'falcon2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON2_11b = { + name: 'falcon2:11b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.4gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const + +// const FALCON2_IMAGE_MODELS = [] as const + +// export const FALCON2_EMBEDDING_MODELS = [] as const + +// const FALCON2_AUDIO_MODELS = [] as const + +// const FALCON2_VIDEO_MODELS = [] as const + +// export type Falcon2ChatModels = (typeof FALCON2_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON2_LATEST.name]: ChatRequest + [FALCON2_11b.name]: ChatRequest +} + +export type Falcon2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON2_LATEST.name]: typeof FALCON2_LATEST.supports.input + [FALCON2_11b.name]: typeof FALCON2_11b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts new file mode 100644 index 00000000..50e15cee --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FALCON3_LATEST = { + name: 'falcon3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_1b = { + name: 'falcon3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_3b = { + name: 'falcon3:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_7b = { + name: 'falcon3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const FALCON3_10b = { + name: 'falcon3:10b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '6.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const FALCON3_MODELS = [ + FALCON3_LATEST.name, + FALCON3_1b.name, + FALCON3_3b.name, + FALCON3_7b.name, + FALCON3_10b.name, +] as const + +// const FALCON3_IMAGE_MODELS = [] as const + +// export const FALCON3_EMBEDDING_MODELS = [] as const + +// const FALCON3_AUDIO_MODELS = [] as const + +// const FALCON3_VIDEO_MODELS = [] as const + +// export type Falcon3ChatModels = (typeof FALCON3_MODELS)[number] + +// Manual type map for per-model provider options +export type Falcon3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FALCON3_LATEST.name]: ChatRequest + [FALCON3_1b.name]: ChatRequest + [FALCON3_3b.name]: ChatRequest + [FALCON3_7b.name]: ChatRequest + [FALCON3_10b.name]: ChatRequest +} + +export type Falcon3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FALCON3_LATEST.name]: typeof FALCON3_LATEST.supports.input + [FALCON3_1b.name]: typeof FALCON3_1b.supports.input + [FALCON3_3b.name]: typeof FALCON3_3b.supports.input + [FALCON3_7b.name]: typeof FALCON3_7b.supports.input + [FALCON3_10b.name]: typeof FALCON3_10b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts new file mode 100644 index 00000000..517616a4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const FIREFUNCTION_V2_LATEST = { + name: 'firefunction-v2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const FIREFUNCTION_V2_70b = { + name: 'firefunction-v2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const FIREFUNCTION_V2_MODELS = [ + FIREFUNCTION_V2_LATEST.name, + FIREFUNCTION_V2_70b.name, +] as const + +// const FIREFUNCTION_V2_IMAGE_MODELS = [] as const + +// export const FIREFUNCTION_V2_EMBEDDING_MODELS = [] as const + +// const FIREFUNCTION_V2_AUDIO_MODELS = [] as const + +// const FIREFUNCTION_V2_VIDEO_MODELS = [] as const + +// export type Firefunction_V2ChatModels = (typeof FIREFUNCTION_V2_MODELS)[number] + +// Manual type map for per-model provider options +export type Firefunction_V2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [FIREFUNCTION_V2_LATEST.name]: ChatRequest + [FIREFUNCTION_V2_70b.name]: ChatRequest +} + +export type Firefunction_V2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [FIREFUNCTION_V2_LATEST.name]: typeof FIREFUNCTION_V2_LATEST.supports.input + [FIREFUNCTION_V2_70b.name]: typeof FIREFUNCTION_V2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts new file mode 100644 index 00000000..b1b66f02 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA_LATEST = { + name: 'gemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA_2b = { + name: 'gemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA_7b = { + name: 'gemma:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA_MODELS = [ + GEMMA_LATEST.name, + GEMMA_2b.name, + GEMMA_7b.name, +] as const + +// const GEMMA_IMAGE_MODELS = [] as const + +// export const GEMMA_EMBEDDING_MODELS = [] as const + +// const GEMMA_AUDIO_MODELS = [] as const + +// const GEMMA_VIDEO_MODELS = [] as const + +// export type GemmaChatModels = (typeof GEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type GemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA_LATEST.name]: ChatRequest + [GEMMA_2b.name]: ChatRequest + [GEMMA_7b.name]: ChatRequest +} + +export type GemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA_LATEST.name]: typeof GEMMA_LATEST.supports.input + [GEMMA_2b.name]: typeof GEMMA_2b.supports.input + [GEMMA_7b.name]: typeof GEMMA_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts new file mode 100644 index 00000000..b5b594a8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA2_LATEST = { + name: 'gemma2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_2b = { + name: 'gemma2:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_9b = { + name: 'gemma2:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.4gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA2_27b = { + name: 'gemma2:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '16gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA2_MODELS = [ + GEMMA2_LATEST.name, + GEMMA2_2b.name, + GEMMA2_9b.name, + GEMMA2_27b.name, +] as const + +// const GEMMA2_IMAGE_MODELS = [] as const + +// export const GEMMA2_EMBEDDING_MODELS = [] as const + +// const GEMMA2_AUDIO_MODELS = [] as const + +// const GEMMA2_VIDEO_MODELS = [] as const + +// export type Gemma2ChatModels = (typeof GEMMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA2_LATEST.name]: ChatRequest + [GEMMA2_2b.name]: ChatRequest + [GEMMA2_9b.name]: ChatRequest + [GEMMA2_27b.name]: ChatRequest +} + +export type Gemma2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA2_LATEST.name]: typeof GEMMA2_LATEST.supports.input + [GEMMA2_2b.name]: typeof GEMMA2_2b.supports.input + [GEMMA2_9b.name]: typeof GEMMA2_9b.supports.input + [GEMMA2_27b.name]: typeof GEMMA2_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts new file mode 100644 index 00000000..e10daf25 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -0,0 +1,108 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GEMMA3_LATEST = { + name: 'gemma3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_270m = { + name: 'gemma3:270m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '298mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_1b = { + name: 'gemma3:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '815mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_4b = { + name: 'gemma3:4b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '3.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_12b = { + name: 'gemma3:12b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '8.1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GEMMA3_27b = { + name: 'gemma3:27b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GEMMA3_MODELS = [ + GEMMA3_LATEST.name, + GEMMA3_270m.name, + GEMMA3_1b.name, + GEMMA3_4b.name, + GEMMA3_12b.name, + GEMMA3_27b.name, +] as const + +// const GEMMA3_IMAGE_MODELS = [] as const + +// export const GEMMA3_EMBEDDING_MODELS = [] as const + +// const GEMMA3_AUDIO_MODELS = [] as const + +// const GEMMA3_VIDEO_MODELS = [] as const + +// export type Gemma3ChatModels = (typeof GEMMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Gemma3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GEMMA3_LATEST.name]: ChatRequest + [GEMMA3_270m.name]: ChatRequest + [GEMMA3_1b.name]: ChatRequest + [GEMMA3_4b.name]: ChatRequest + [GEMMA3_12b.name]: ChatRequest + [GEMMA3_27b.name]: ChatRequest +} + +export type Gemma3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GEMMA3_LATEST.name]: typeof GEMMA3_LATEST.supports.input + [GEMMA3_270m.name]: typeof GEMMA3_270m.supports.input + [GEMMA3_1b.name]: typeof GEMMA3_1b.supports.input + [GEMMA3_4b.name]: typeof GEMMA3_4b.supports.input + [GEMMA3_12b.name]: typeof GEMMA3_12b.supports.input + [GEMMA3_27b.name]: typeof GEMMA3_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts new file mode 100644 index 00000000..6f28a433 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_DENSE_LATEST = { + name: 'granite3-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_DENSE_2b = { + name: 'granite3-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_DENSE_8b = { + name: 'granite3-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_DENSE_MODELS = [ + GRANITE3_DENSE_LATEST.name, + GRANITE3_DENSE_2b.name, + GRANITE3_DENSE_8b.name, +] as const + +// const GRANITE3_DENSE_IMAGE_MODELS = [] as const + +// export const GRANITE3_DENSE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_DENSE_AUDIO_MODELS = [] as const + +// const GRANITE3_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3Dense3ChatModels = (typeof GRANITE3_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_DENSE_LATEST.name]: ChatRequest + [GRANITE3_DENSE_2b.name]: ChatRequest + [GRANITE3_DENSE_8b.name]: ChatRequest +} + +export type Granite3DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_DENSE_LATEST.name]: typeof GRANITE3_DENSE_LATEST.supports.input + [GRANITE3_DENSE_2b.name]: typeof GRANITE3_DENSE_2b.supports.input + [GRANITE3_DENSE_8b.name]: typeof GRANITE3_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts new file mode 100644 index 00000000..798118cb --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_GUARDIAN_LATEST = { + name: 'granite3-guardian:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_GUARDIAN_2b = { + name: 'granite3-guardian:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_GUARDIAN_8b = { + name: 'granite3-guardian:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_GUARDIAN_MODELS = [ + GRANITE3_GUARDIAN_LATEST.name, + GRANITE3_GUARDIAN_2b.name, + GRANITE3_GUARDIAN_8b.name, +] as const + +// const GRANITE3_GUARDIAN_IMAGE_MODELS = [] as const + +// export const GRANITE3_GUARDIAN_EMBEDDING_MODELS = [] as const + +// const GRANITE3_GUARDIAN_AUDIO_MODELS = [] as const + +// const GRANITE3_GUARDIAN_VIDEO_MODELS = [] as const + +// export type GraniteGuardian3ChatModels = (typeof GRANITE3_GUARDIAN_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3GuardianChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_GUARDIAN_LATEST.name]: ChatRequest + [GRANITE3_GUARDIAN_2b.name]: ChatRequest + [GRANITE3_GUARDIAN_8b.name]: ChatRequest +} + +export type Granite3GuardianModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_GUARDIAN_LATEST.name]: typeof GRANITE3_GUARDIAN_LATEST.supports.input + [GRANITE3_GUARDIAN_2b.name]: typeof GRANITE3_GUARDIAN_2b.supports.input + [GRANITE3_GUARDIAN_8b.name]: typeof GRANITE3_GUARDIAN_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts new file mode 100644 index 00000000..4d43bf2d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_MOE_LATEST = { + name: 'granite3-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_MOE_1b = { + name: 'granite3-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '822mb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_MOE_3b = { + name: 'granite3-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.1gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_MOE_MODELS = [ + GRANITE3_MOE_LATEST.name, + GRANITE3_MOE_1b.name, + GRANITE3_MOE_3b.name, +] as const + +// const GRANITE3_MOE_IMAGE_MODELS = [] as const + +// export const GRANITE3_MOE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_MOE_AUDIO_MODELS = [] as const + +// const GRANITE3_MOE_VIDEO_MODELS = [] as const + +// export type GraniteMoe3ChatModels = (typeof GRANITE3_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_MOE_LATEST.name]: ChatRequest + [GRANITE3_MOE_1b.name]: ChatRequest + [GRANITE3_MOE_3b.name]: ChatRequest +} + +export type Granite3MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_MOE_LATEST.name]: typeof GRANITE3_MOE_LATEST.supports.input + [GRANITE3_MOE_1b.name]: typeof GRANITE3_MOE_1b.supports.input + [GRANITE3_MOE_3b.name]: typeof GRANITE3_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts new file mode 100644 index 00000000..2dbf7374 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_1_DENSE_LATEST = { + name: 'granite3.1-dense:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_DENSE_2b = { + name: 'granite3.1-dense:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_DENSE_8b = { + name: 'granite3.1-dense:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '5gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_1_DENSE_MODELS = [ + GRANITE3_1_DENSE_LATEST.name, + GRANITE3_1_DENSE_2b.name, + GRANITE3_1_DENSE_8b.name, +] as const + +// const GRANITE3_1_DENSE_IMAGE_MODELS = [] as const + +// export const GRANITE3_1_DENSE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_1_DENSE_AUDIO_MODELS = [] as const + +// const GRANITE3_1_DENSE_VIDEO_MODELS = [] as const + +// export type Granite3_1Dense3ChatModels = (typeof GRANITE3_1_DENSE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1DenseChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_DENSE_LATEST.name]: ChatRequest + [GRANITE3_1_DENSE_2b.name]: ChatRequest + [GRANITE3_1_DENSE_8b.name]: ChatRequest +} + +export type Granite3_1DenseModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_DENSE_LATEST.name]: typeof GRANITE3_1_DENSE_LATEST.supports.input + [GRANITE3_1_DENSE_2b.name]: typeof GRANITE3_1_DENSE_2b.supports.input + [GRANITE3_1_DENSE_8b.name]: typeof GRANITE3_1_DENSE_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts new file mode 100644 index 00000000..7d513967 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const GRANITE3_1_MOE_LATEST = { + name: 'granite3.1-moe:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_MOE_1b = { + name: 'granite3.1-moe:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.4gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const GRANITE3_1_MOE_3b = { + name: 'granite3.1-moe:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const GRANITE3_1_MOE_MODELS = [ + GRANITE3_1_MOE_LATEST.name, + GRANITE3_1_MOE_1b.name, + GRANITE3_1_MOE_3b.name, +] as const + +// const GRANITE3_1_MOE_IMAGE_MODELS = [] as const + +// export const GRANITE3_1_MOE_EMBEDDING_MODELS = [] as const + +// const GRANITE3_1_MOE_AUDIO_MODELS = [] as const + +// const GRANITE3_1_MOE_VIDEO_MODELS = [] as const + +// export type Granite3_1MoeChatModels = (typeof GRANITE3_1_MOE_MODELS)[number] + +// Manual type map for per-model provider options +export type Granite3_1MoeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [GRANITE3_1_MOE_LATEST.name]: ChatRequest + [GRANITE3_1_MOE_1b.name]: ChatRequest + [GRANITE3_1_MOE_3b.name]: ChatRequest +} + +export type Granite3_1MoeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [GRANITE3_1_MOE_LATEST.name]: typeof GRANITE3_1_MOE_LATEST.supports.input + [GRANITE3_1_MOE_1b.name]: typeof GRANITE3_1_MOE_1b.supports.input + [GRANITE3_1_MOE_3b.name]: typeof GRANITE3_1_MOE_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts new file mode 100644 index 00000000..db18d06d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA_GUARD3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA_GUARD3_1b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.6gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA_GUARD3_8b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA_GUARD3_MODELS = [ + LLAMA_GUARD3_LATEST.name, + LLAMA_GUARD3_1b.name, + LLAMA_GUARD3_8b.name, +] as const + +// const LLAMA_GUARD3_IMAGE_MODELS = [] as const + +// export const LLAMA_GUARD3_EMBEDDING_MODELS = [] as const + +// const LLAMA_GUARD3_AUDIO_MODELS = [] as const + +// const LLAMA_GUARD3_VIDEO_MODELS = [] as const + +// export type LlamaGuard3ChatModels = (typeof LLAMA_GUARD3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlamaGuard3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA_GUARD3_LATEST.name]: ChatRequest + [LLAMA_GUARD3_1b.name]: ChatRequest + [LLAMA_GUARD3_8b.name]: ChatRequest +} + +export type LlamaGuard3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA_GUARD3_LATEST.name]: typeof LLAMA_GUARD3_LATEST.supports.input + [LLAMA_GUARD3_1b.name]: typeof LLAMA_GUARD3_1b.supports.input + [LLAMA_GUARD3_8b.name]: typeof LLAMA_GUARD3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts new file mode 100644 index 00000000..44a9c66d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA2_LATEST = { + name: 'llama2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_7b = { + name: 'llama2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_13b = { + name: 'llama2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA2_70b = { + name: 'llama2:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '39gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA2_MODELS = [ + LLAMA2_LATEST.name, + LLAMA2_7b.name, + LLAMA2_13b.name, + LLAMA2_70b.name, +] as const + +// const LLAMA2_IMAGE_MODELS = [] as const + +// export const LLAMA2_EMBEDDING_MODELS = [] as const + +// const LLAMA2_AUDIO_MODELS = [] as const + +// const LLAMA2_VIDEO_MODELS = [] as const + +// export type Llama2ChatModels = (typeof LLAMA2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA2_LATEST.name]: ChatRequest + [LLAMA2_7b.name]: ChatRequest + [LLAMA2_13b.name]: ChatRequest + [LLAMA2_70b.name]: ChatRequest +} + +export type Llama2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA2_LATEST.name]: typeof LLAMA2_LATEST.supports.input + [LLAMA2_7b.name]: typeof LLAMA2_7b.supports.input + [LLAMA2_13b.name]: typeof LLAMA2_13b.supports.input + [LLAMA2_70b.name]: typeof LLAMA2_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts new file mode 100644 index 00000000..58063a03 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_CHATQA_LATEST = { + name: 'llama3-chatqa:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_CHATQA_8b = { + name: 'llama3-chatqa:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_CHATQA_70b = { + name: 'llama3-chatqa:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_CHATQA_MODELS = [ + LLAMA3_CHATQA_LATEST.name, + LLAMA3_CHATQA_8b.name, + LLAMA3_CHATQA_70b.name, +] as const + +// const LLAMA3_CHATQA_IMAGE_MODELS = [] as const + +// export const LLAMA3_CHATQA_EMBEDDING_MODELS = [] as const + +// const LLAMA3_CHATQA_AUDIO_MODELS = [] as const + +// const LLAMA3_CHATQA_VIDEO_MODELS = [] as const + +// export type Llama3ChatQaChatModels = (typeof LLAMA3_CHATQA_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatQaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_CHATQA_LATEST.name]: ChatRequest + [LLAMA3_CHATQA_8b.name]: ChatRequest + [LLAMA3_CHATQA_70b.name]: ChatRequest +} + +export type Llama3ChatQaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_CHATQA_LATEST.name]: typeof LLAMA3_CHATQA_LATEST.supports.input + [LLAMA3_CHATQA_8b.name]: typeof LLAMA3_CHATQA_8b.supports.input + [LLAMA3_CHATQA_70b.name]: typeof LLAMA3_CHATQA_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts new file mode 100644 index 00000000..8a658e6f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_GRADIENT_LATEST = { + name: 'llama3-gradient:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_GRADIENT_8b = { + name: 'llama3-gradient:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_GRADIENT_70b = { + name: 'llama3-gradient:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_GRADIENT_MODELS = [ + LLAMA3_GRADIENT_LATEST.name, + LLAMA3_GRADIENT_8b.name, + LLAMA3_GRADIENT_70b.name, +] as const + +// const LLAMA3_GRADIENT_IMAGE_MODELS = [] as const + +// export const LLAMA3_GRADIENT_EMBEDDING_MODELS = [] as const + +// const LLAMA3_GRADIENT_AUDIO_MODELS = [] as const + +// const LLAMA3_GRADIENT_VIDEO_MODELS = [] as const + +// export type Llama3GradientChatModels = (typeof LLAMA3_GRADIENT_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3GradientChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_GRADIENT_LATEST.name]: ChatRequest + [LLAMA3_GRADIENT_8b.name]: ChatRequest + [LLAMA3_GRADIENT_70b.name]: ChatRequest +} + +export type Llama3GradientModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_GRADIENT_LATEST.name]: typeof LLAMA3_GRADIENT_LATEST.supports.input + [LLAMA3_GRADIENT_8b.name]: typeof LLAMA3_GRADIENT_8b.supports.input + [LLAMA3_GRADIENT_70b.name]: typeof LLAMA3_GRADIENT_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts new file mode 100644 index 00000000..66186581 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_1_LATEST = { + name: 'llama3.1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_8b = { + name: 'llama3.1:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_70b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_1_405b = { + name: 'llama3.1:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '243gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_1_MODELS = [ + LLAMA3_1_LATEST.name, + LLAMA3_1_8b.name, + LLAMA3_1_70b.name, + LLAMA3_1_405b.name, +] as const + +// const LLAMA3_1_IMAGE_MODELS = [] as const + +// export const LLAMA3_1_EMBEDDING_MODELS = [] as const + +// const LLAMA3_1_AUDIO_MODELS = [] as const + +// const LLAMA3_1_VIDEO_MODELS = [] as const + +// export type Llama3_1ChatModels = (typeof LLAMA3_1_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_1_LATEST.name]: ChatRequest + [LLAMA3_1_8b.name]: ChatRequest + [LLAMA3_1_70b.name]: ChatRequest + [LLAMA3_1_405b.name]: ChatRequest +} + +export type Llama3_1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_1_LATEST.name]: typeof LLAMA3_1_LATEST.supports.input + [LLAMA3_1_8b.name]: typeof LLAMA3_1_8b.supports.input + [LLAMA3_1_70b.name]: typeof LLAMA3_1_70b.supports.input + [LLAMA3_1_405b.name]: typeof LLAMA3_1_405b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts new file mode 100644 index 00000000..d840815f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_2_VISION_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '7.8b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_VISION_11b = { + name: 'llama3.2:11b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_VISION_90b = { + name: 'llama3.2:90b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '55gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_2_VISION_MODELS = [ + LLAMA3_2_VISION_LATEST.name, + LLAMA3_2_VISION_11b.name, + LLAMA3_2_VISION_90b.name, +] as const + +// export const LLAMA3_2_VISION_IMAGE_MODELS = [] as const + +// export const LLAMA3_2_VISION_EMBEDDING_MODELS = [] as const + +// export const LLAMA3_2_VISION_AUDIO_MODELS = [] as const + +// export const LLAMA3_2_VISION_VIDEO_MODELS = [] as const + +// export export type Llama3_2VisionChatModels = (typeof LLAMA3_2Vision_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2VisionChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_VISION_LATEST.name]: ChatRequest + [LLAMA3_2_VISION_11b.name]: ChatRequest + [LLAMA3_2_VISION_90b.name]: ChatRequest +} + +export type Llama3_2VisionModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_VISION_LATEST.name]: typeof LLAMA3_2_VISION_LATEST.supports.input + [LLAMA3_2_VISION_11b.name]: typeof LLAMA3_2_VISION_11b.supports.input + [LLAMA3_2_VISION_90b.name]: typeof LLAMA3_2_VISION_90b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts new file mode 100644 index 00000000..328adcce --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_2_LATEST = { + name: 'llama3.2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_1b = { + name: 'llama3.2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.3gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_2_3b = { + name: 'llama3.2:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_2_MODELS = [ + LLAMA3_2_LATEST.name, + LLAMA3_2_1b.name, + LLAMA3_2_3b.name, +] as const + +// const LLAMA3_2_IMAGE_MODELS = [] as const + +// export const LLAMA3_2_EMBEDDING_MODELS = [] as const + +// const LLAMA3_2_AUDIO_MODELS = [] as const + +// const LLAMA3_2_VIDEO_MODELS = [] as const + +// export type Llama3_2ChatModels = (typeof LLAMA3_2_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_2_LATEST.name]: ChatRequest + [LLAMA3_2_1b.name]: ChatRequest + [LLAMA3_2_3b.name]: ChatRequest +} + +export type Llama3_2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_2_LATEST.name]: typeof LLAMA3_2_LATEST.supports.input + [LLAMA3_2_1b.name]: typeof LLAMA3_2_1b.supports.input + [LLAMA3_2_3b.name]: typeof LLAMA3_2_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts new file mode 100644 index 00000000..1cbc63a8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_3_LATEST = { + name: 'llama3.3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43b', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_3_70b = { + name: 'llama3.3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_3_MODELS = [ + LLAMA3_3_LATEST.name, + LLAMA3_3_70b.name, +] as const + +// const LLAMA3_3_IMAGE_MODELS = [] as const + +// export const LLAMA3_3_EMBEDDING_MODELS = [] as const + +// const LLAMA3_3_AUDIO_MODELS = [] as const + +// const LLAMA3_3_VIDEO_MODELS = [] as const + +// export type Llama3_3ChatModels = (typeof LLAMA3_3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_3_LATEST.name]: ChatRequest + [LLAMA3_3_70b.name]: ChatRequest +} + +export type Llama3_3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_3_LATEST.name]: typeof LLAMA3_3_LATEST.supports.input + [LLAMA3_3_70b.name]: typeof LLAMA3_3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts new file mode 100644 index 00000000..d61504b9 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA3_LATEST = { + name: 'llama3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_8b = { + name: 'llama3:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA3_70b = { + name: 'llama3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '40gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA3_MODELS = [ + LLAMA3_LATEST.name, + LLAMA3_8b.name, + LLAMA3_70b.name, +] as const + +// const LLAMA3_IMAGE_MODELS = [] as const + +// export const LLAMA3_EMBEDDING_MODELS = [] as const + +// const LLAMA3_AUDIO_MODELS = [] as const + +// const LLAMA3_VIDEO_MODELS = [] as const + +// export type Llama3ChatModels = (typeof LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA3_LATEST.name]: ChatRequest + [LLAMA3_8b.name]: ChatRequest + [LLAMA3_70b.name]: ChatRequest +} + +export type Llama3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA3_LATEST.name]: typeof LLAMA3_LATEST.supports.input + [LLAMA3_8b.name]: typeof LLAMA3_8b.supports.input + [LLAMA3_70b.name]: typeof LLAMA3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts new file mode 100644 index 00000000..418cc25d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAMA4_LATEST = { + name: 'llama4:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67b', + context: 10_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA4_16X17b = { + name: 'llama4:16x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '67gb', + context: 10_000_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAMA4_128X17b = { + name: 'llama4:128x17b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['tools', 'vision'], + }, + size: '245gb', + context: 1_000_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAMA4_MODELS = [ + LLAMA4_LATEST.name, + LLAMA4_16X17b.name, + LLAMA4_128X17b.name, +] as const + +// const LLAMA4_IMAGE_MODELS = [] as const + +// export const LLAMA4_EMBEDDING_MODELS = [] as const + +// const LLAMA4_AUDIO_MODELS = [] as const + +// const LLAMA4_VIDEO_MODELS = [] as const + +// export type Llama3_4ChatModels = (typeof LLAMA4_MODELS)[number] + +// Manual type map for per-model provider options +export type Llama3_4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAMA4_LATEST.name]: ChatRequest + [LLAMA4_16X17b.name]: ChatRequest + [LLAMA4_128X17b.name]: ChatRequest +} + +export type Llama3_4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAMA4_LATEST.name]: typeof LLAMA4_LATEST.supports.input + [LLAMA4_16X17b.name]: typeof LLAMA4_16X17b.supports.input + [LLAMA4_128X17b.name]: typeof LLAMA4_128X17b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts new file mode 100644 index 00000000..da96e112 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_LLAMA3_LATEST = { + name: 'llava-llama3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5b', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_LLAMA3_8b = { + name: 'llava-llama3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '5.5gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_LLAMA3_MODELS = [ + LLAVA_LLAMA3_LATEST.name, + LLAVA_LLAMA3_8b.name, +] as const + +// const LLAVA_LLAMA3_IMAGE_MODELS = [] as const + +// export const LLAVA_LLAMA3_EMBEDDING_MODELS = [] as const + +// const LLAVA_LLAMA3_AUDIO_MODELS = [] as const + +// const LLAVA_LLAMA3_VIDEO_MODELS = [] as const + +// export type LlavaLlamaChatModels = (typeof LLAVA_LLAMA3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaLlamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LLAMA3_LATEST.name]: ChatRequest + [LLAVA_LLAMA3_8b.name]: ChatRequest +} + +export type LlavaLlamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LLAMA3_LATEST.name]: typeof LLAVA_LLAMA3_LATEST.supports.input + [LLAVA_LLAMA3_8b.name]: typeof LLAVA_LLAMA3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts new file mode 100644 index 00000000..4c725a64 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_PHI3_LATEST = { + name: 'llava-phi3:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9b', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_PHI3_8b = { + name: 'llava-phi3:8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_PHI3_MODELS = [ + LLAVA_PHI3_LATEST.name, + LLAVA_PHI3_8b.name, +] as const + +// const LLAVA_PHI3_IMAGE_MODELS = [] as const + +// export const LLAVA_PHI3_EMBEDDING_MODELS = [] as const + +// const LLAVA_PHI3_AUDIO_MODELS = [] as const + +// const LLAVA_PHI3_VIDEO_MODELS = [] as const + +// export type LlavaPhi3ChatModels = (typeof LLAVA_PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type LlavaPhi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_PHI3_LATEST.name]: ChatRequest + [LLAVA_PHI3_8b.name]: ChatRequest +} + +export type LlavaPhi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_PHI3_LATEST.name]: typeof LLAVA_PHI3_LATEST.supports.input + [LLAVA_PHI3_8b.name]: typeof LLAVA_PHI3_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts new file mode 100644 index 00000000..18e7f762 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const LLAVA_LATEST = { + name: 'llava:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7b', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_7b = { + name: 'llava:7b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_13b = { + name: 'llava:13b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '8gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const LLAVA_34b = { + name: 'llava:34b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '20gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const LLAVA_MODELS = [ + LLAVA_LATEST.name, + LLAVA_7b.name, + LLAVA_13b.name, + LLAVA_34b.name, +] as const + +// const LLAVA_IMAGE_MODELS = [] as const + +// export const LLAVA_EMBEDDING_MODELS = [] as const + +// const LLAVA_AUDIO_MODELS = [] as const + +// const LLAVA_VIDEO_MODELS = [] as const + +// export type llavaChatModels = (typeof LLAVA_MODELS)[number] + +// Manual type map for per-model provider options +export type llavaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [LLAVA_LATEST.name]: ChatRequest + [LLAVA_7b.name]: ChatRequest + [LLAVA_13b.name]: ChatRequest + [LLAVA_34b.name]: ChatRequest +} + +export type llavaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [LLAVA_LATEST.name]: typeof LLAVA_LATEST.supports.input + [LLAVA_7b.name]: typeof LLAVA_7b.supports.input + [LLAVA_13b.name]: typeof LLAVA_13b.supports.input + [LLAVA_34b.name]: typeof LLAVA_34b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts new file mode 100644 index 00000000..fb44d209 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MARCO_O1_LATEST = { + name: 'marco-o1:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MARCO_O1_7b = { + name: 'marco-o1:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const + +// const MARCO_O1_IMAGE_MODELS = [] as const + +// export const MARCO_O1_EMBEDDING_MODELS = [] as const + +// const MARCO_O1_AUDIO_MODELS = [] as const + +// const MARCO_O1_VIDEO_MODELS = [] as const + +// export type MarcoO1ChatModels = (typeof MARCO_O1_MODELS)[number] + +// Manual type map for per-model provider options +export type MarcoO1ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MARCO_O1_LATEST.name]: ChatRequest + [MARCO_O1_7b.name]: ChatRequest +} + +export type MarcoO1ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MARCO_O1_LATEST.name]: typeof MARCO_O1_LATEST.supports.input + [MARCO_O1_7b.name]: typeof MARCO_O1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts new file mode 100644 index 00000000..7f2055f2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_LARGE_LATEST = { + name: 'mistral-large:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_LARGE_123b = { + name: 'mistral-large:123b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '73gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_LARGE_MODELS = [ + MISTRAL_LARGE_LATEST.name, + MISTRAL_LARGE_123b.name, +] as const + +// const MISTRAL_LARGE_IMAGE_MODELS = [] as const + +// export const MISTRAL_LARGE_EMBEDDING_MODELS = [] as const + +// const MISTRAL_LARGE_AUDIO_MODELS = [] as const + +// const MISTRAL_LARGE_VIDEO_MODELS = [] as const + +// export type MistralLargeChatModels = (typeof MISTRAL_LARGE_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralLargeChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LARGE_LATEST.name]: ChatRequest + [MISTRAL_LARGE_123b.name]: ChatRequest +} + +export type MistralLargeModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LARGE_LATEST.name]: typeof MISTRAL_LARGE_LATEST.supports.input + [MISTRAL_LARGE_123b.name]: typeof MISTRAL_LARGE_123b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts new file mode 100644 index 00000000..39fb3ab6 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_NEMO_LATEST = { + name: 'mistral-nemo:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_NEMO_12b = { + name: 'mistral-nemo:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '7.1gb', + context: 1_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_NEMO_MODELS = [ + MISTRAL_NEMO_LATEST.name, + MISTRAL_NEMO_12b.name, +] as const + +// const MISTRAL_NEMO_IMAGE_MODELS = [] as const + +// export const MISTRAL_NEMO_EMBEDDING_MODELS = [] as const + +// const MISTRAL_NEMO_AUDIO_MODELS = [] as const + +// const MISTRAL_NEMO_VIDEO_MODELS = [] as const + +// export type MistralNemoChatModels = (typeof MISTRAL_NEMO_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralNemoChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_NEMO_LATEST.name]: ChatRequest + [MISTRAL_NEMO_12b.name]: ChatRequest +} + +export type MistralNemoModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_NEMO_LATEST.name]: typeof MISTRAL_NEMO_LATEST.supports.input + [MISTRAL_NEMO_12b.name]: typeof MISTRAL_NEMO_12b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts new file mode 100644 index 00000000..3dabd7d2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_SMALL_LATEST = { + name: 'mistral-small:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '14gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_SMALL_22b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_SMALL_24b = { + name: 'mistral-small:12b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '13gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_SMALL_MODELS = [ + MISTRAL_SMALL_LATEST.name, + MISTRAL_SMALL_22b.name, + MISTRAL_SMALL_24b.name, +] as const + +// const MISTRAL_SMALL_IMAGE_MODELS = [] as const + +// export const MISTRAL_SMALL_EMBEDDING_MODELS = [] as const + +// const MISTRAL_SMALL_AUDIO_MODELS = [] as const + +// const MISTRAL_SMALL_VIDEO_MODELS = [] as const + +// export type MistralSmallChatModels = (typeof MISTRAL_SMALL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralSmallChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_SMALL_LATEST.name]: ChatRequest + [MISTRAL_SMALL_22b.name]: ChatRequest + [MISTRAL_SMALL_24b.name]: ChatRequest +} + +export type MistralSmallModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_SMALL_LATEST.name]: typeof MISTRAL_SMALL_LATEST.supports.input + [MISTRAL_SMALL_22b.name]: typeof MISTRAL_SMALL_22b.supports.input + [MISTRAL_SMALL_24b.name]: typeof MISTRAL_SMALL_24b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts new file mode 100644 index 00000000..55efb14d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MISTRAL_LATEST = { + name: 'mistral:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const MISTRAL_7b = { + name: 'mistral:87', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '2.9gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const + +// const MISTRAL_IMAGE_MODELS = [] as const + +// export const MISTRAL_EMBEDDING_MODELS = [] as const + +// const MISTRAL_AUDIO_MODELS = [] as const + +// const MISTRAL_VIDEO_MODELS = [] as const + +// export type MistralChatModels = (typeof MISTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MistralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MISTRAL_LATEST.name]: ChatRequest + [MISTRAL_7b.name]: ChatRequest +} + +export type MistralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MISTRAL_LATEST.name]: typeof MISTRAL_LATEST.supports.input + [MISTRAL_7b.name]: typeof MISTRAL_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts new file mode 100644 index 00000000..37656cd2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MIXTRAL_LATEST = { + name: 'mixtral:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MIXTRAL_8X7b = { + name: 'mixtral:8x7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '26gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const MIXTRAL_8X22b = { + name: 'mixtral:8x22b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '80gb', + context: 64_000, +} as const satisfies DefaultOllamaModelMeta + +export const MIXTRAL_MODELS = [ + MIXTRAL_LATEST.name, + MIXTRAL_8X7b.name, + MIXTRAL_8X22b.name, +] as const + +// const MIXTRAL_IMAGE_MODELS = [] as const + +// export const MIXTRAL_EMBEDDING_MODELS = [] as const + +// const MIXTRAL_AUDIO_MODELS = [] as const + +// const MIXTRAL_VIDEO_MODELS = [] as const + +// export type MixtralChatModels = (typeof MIXTRAL_MODELS)[number] + +// Manual type map for per-model provider options +export type MixtralChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MIXTRAL_LATEST.name]: ChatRequest + [MIXTRAL_8X7b.name]: ChatRequest + [MIXTRAL_8X22b.name]: ChatRequest +} + +export type MixtralModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MIXTRAL_LATEST.name]: typeof MIXTRAL_LATEST.supports.input + [MIXTRAL_8X7b.name]: typeof MIXTRAL_8X7b.supports.input + [MIXTRAL_8X22b.name]: typeof MIXTRAL_8X22b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts new file mode 100644 index 00000000..50be72ad --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const MOONDREAM_LATEST = { + name: 'moondream:latest', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const MOONDREAM_1_8b = { + name: 'moondream:1.8b', + supports: { + input: ['text', 'image'], + output: ['text'], + capabilities: ['vision'], + }, + size: '1.7gb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const MOONDREAM_MODELS = [ + MOONDREAM_LATEST.name, + MOONDREAM_1_8b.name, +] as const + +// const MOONDREAM_IMAGE_MODELS = [] as const + +// export const MOONDREAM_EMBEDDING_MODELS = [] as const + +// const MOONDREAM_AUDIO_MODELS = [] as const + +// const MOONDREAM_VIDEO_MODELS = [] as const + +// export type MoondreamChatModels = (typeof MOONDREAM_MODELS)[number] + +// Manual type map for per-model provider options +export type MoondreamChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [MOONDREAM_LATEST.name]: ChatRequest + [MOONDREAM_1_8b.name]: ChatRequest +} + +export type MoondreamModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [MOONDREAM_LATEST.name]: typeof MOONDREAM_LATEST.supports.input + [MOONDREAM_1_8b.name]: typeof MOONDREAM_1_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts new file mode 100644 index 00000000..b5a9cc2d --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const NEMOTRON_MINI_LATEST = { + name: 'nemotron-mini:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const NEMOTRON_MINI_4b = { + name: 'nemotron-mini:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '2.7gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const NEMOTRON_MINI_MODELS = [ + NEMOTRON_MINI_LATEST.name, + NEMOTRON_MINI_4b.name, +] as const + +// const NEMOTRON_MINI_IMAGE_MODELS = [] as const + +// export const NEMOTRON_MINI_EMBEDDING_MODELS = [] as const + +// const NEMOTRON_MINI_AUDIO_MODELS = [] as const + +// const NEMOTRON_MINI_VIDEO_MODELS = [] as const + +// export type NemotronMiniChatModels = (typeof NEMOTRON_MINI_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronMiniChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_MINI_LATEST.name]: ChatRequest + [NEMOTRON_MINI_4b.name]: ChatRequest +} + +export type NemotronMiniModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_MINI_LATEST.name]: typeof NEMOTRON_MINI_LATEST.supports.input + [NEMOTRON_MINI_4b.name]: typeof NEMOTRON_MINI_4b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts new file mode 100644 index 00000000..3f06d9ea --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const NEMOTRON_LATEST = { + name: 'nemotron:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const NEMOTRON_70b = { + name: 'nemotron:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const NEMOTRON_MODELS = [ + NEMOTRON_LATEST.name, + NEMOTRON_70b.name, +] as const + +// const NEMOTRON_IMAGE_MODELS = [] as const + +// export const NEMOTRON_EMBEDDING_MODELS = [] as const + +// const NEMOTRON_AUDIO_MODELS = [] as const + +// const NEMOTRON_VIDEO_MODELS = [] as const + +// export type NemotronChatModels = (typeof NEMOTRON_MODELS)[number] + +// Manual type map for per-model provider options +export type NemotronChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [NEMOTRON_LATEST.name]: ChatRequest + [NEMOTRON_70b.name]: ChatRequest +} + +export type NemotronModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [NEMOTRON_LATEST.name]: typeof NEMOTRON_LATEST.supports.input + [NEMOTRON_70b.name]: typeof NEMOTRON_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts new file mode 100644 index 00000000..621bc7b8 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OLMO2_LATEST = { + name: 'olmo2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OLMO2_7b = { + name: 'olmo2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OLMO2_13b = { + name: 'olmo2:13b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +export const OLMO2_MODELS = [ + OLMO2_LATEST.name, + OLMO2_7b.name, + OLMO2_13b.name, +] as const + +// const OLMO2_IMAGE_MODELS = [] as const + +// export const OLMO2_EMBEDDING_MODELS = [] as const + +// const OLMO2_AUDIO_MODELS = [] as const + +// const OLMO2_VIDEO_MODELS = [] as const + +// export type Olmo2ChatModels = (typeof OLMO2_MODELS)[number] + +// Manual type map for per-model provider options +export type Olmo2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OLMO2_LATEST.name]: ChatRequest + [OLMO2_7b.name]: ChatRequest + [OLMO2_13b.name]: ChatRequest +} + +export type Olmo2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OLMO2_LATEST.name]: typeof OLMO2_LATEST.supports.input + [OLMO2_7b.name]: typeof OLMO2_7b.supports.input + [OLMO2_13b.name]: typeof OLMO2_13b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts new file mode 100644 index 00000000..0b22d3b2 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OPENCODER_LATEST = { + name: 'opencoder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENCODER_1_5b = { + name: 'opencoder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.4gb', + context: 4_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENCODER_8b = { + name: 'opencoder:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const OPENCODER_MODELS = [ + OPENCODER_LATEST.name, + OPENCODER_1_5b.name, + OPENCODER_8b.name, +] as const + +// const OPENCODER_IMAGE_MODELS = [] as const + +// export const OPENCODER_EMBEDDING_MODELS = [] as const + +// const OPENCODER_AUDIO_MODELS = [] as const + +// const OPENCODER_VIDEO_MODELS = [] as const + +// export type OpencoderChatModels = (typeof OPENCODER_MODELS)[number] + +// Manual type map for per-model provider options +export type OpencoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENCODER_LATEST.name]: ChatRequest + [OPENCODER_1_5b.name]: ChatRequest + [OPENCODER_8b.name]: ChatRequest +} + +export type OpencoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENCODER_LATEST.name]: typeof OPENCODER_LATEST.supports.input + [OPENCODER_1_5b.name]: typeof OPENCODER_1_5b.supports.input + [OPENCODER_8b.name]: typeof OPENCODER_8b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts new file mode 100644 index 00000000..459baeca --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const OPENHERMES_LATEST = { + name: 'openhermes:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENHERMES_V2 = { + name: 'openhermes:v2', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const OPENHERMES_V2_5 = { + name: 'openhermes:v2.5', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const OPENHERMES_MODELS = [ + OPENHERMES_LATEST.name, + OPENHERMES_V2.name, + OPENHERMES_V2_5.name, +] as const + +// const OPENHERMES_IMAGE_MODELS = [] as const + +// export const OPENHERMES_EMBEDDING_MODELS = [] as const + +// const OPENHERMES_AUDIO_MODELS = [] as const + +// const OPENHERMES_VIDEO_MODELS = [] as const + +// export type OpenhermesChatModels = (typeof OPENHERMES_MODELS)[number] + +// Manual type map for per-model provider options +export type OpenhermesChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [OPENHERMES_LATEST.name]: ChatRequest + [OPENHERMES_V2.name]: ChatRequest + [OPENHERMES_V2_5.name]: ChatRequest +} + +export type OpenhermesModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [OPENHERMES_LATEST.name]: typeof OPENHERMES_LATEST.supports.input + [OPENHERMES_V2.name]: typeof OPENHERMES_V2.supports.input + [OPENHERMES_V2_5.name]: typeof OPENHERMES_V2_5.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts new file mode 100644 index 00000000..7836b653 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const PHI3_LATEST = { + name: 'phi3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI3_3_8b = { + name: 'phi3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.2gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI3_14b = { + name: 'phi3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '7.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const PHI3_MODELS = [ + PHI3_LATEST.name, + PHI3_3_8b.name, + PHI3_14b.name, +] as const + +// const PHI3_IMAGE_MODELS = [] as const + +// export const PHI3_EMBEDDING_MODELS = [] as const + +// const PHI3_AUDIO_MODELS = [] as const + +// const PHI3_VIDEO_MODELS = [] as const + +// export type Phi3ChatModels = (typeof PHI3_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI3_LATEST.name]: ChatRequest + [PHI3_3_8b.name]: ChatRequest + [PHI3_14b.name]: ChatRequest +} + +export type Phi3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI3_LATEST.name]: typeof PHI3_LATEST.supports.input + [PHI3_3_8b.name]: typeof PHI3_3_8b.supports.input + [PHI3_14b.name]: typeof PHI3_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts new file mode 100644 index 00000000..38ffeb9b --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const PHI4_LATEST = { + name: 'phi4:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +const PHI4_14b = { + name: 'phi4:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '9.1gb', + context: 16_000, +} as const satisfies DefaultOllamaModelMeta + +export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const + +// const PHI4_IMAGE_MODELS = [] as const + +// export const PHI4_EMBEDDING_MODELS = [] as const + +// const PHI4_AUDIO_MODELS = [] as const + +// const PHI4_VIDEO_MODELS = [] as const + +// export type Phi4ChatModels = (typeof PHI4_MODELS)[number] + +// Manual type map for per-model provider options +export type Phi4ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [PHI4_LATEST.name]: ChatRequest + [PHI4_14b.name]: ChatRequest +} + +export type Phi4ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [PHI4_LATEST.name]: typeof PHI4_LATEST.supports.input + [PHI4_14b.name]: typeof PHI4_14b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts new file mode 100644 index 00000000..fee586de --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -0,0 +1,150 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN_LATEST = { + name: 'qwen:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_0_5b = { + name: 'qwen:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '395mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_1_8b = { + name: 'qwen:1.8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_4b = { + name: 'qwen:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '2.3gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_7b = { + name: 'qwen:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.5gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_14b = { + name: 'qwen:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '8.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_32b = { + name: 'qwen:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '18gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_72b = { + name: 'qwen:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '41gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN_110b = { + name: 'qwen:110b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '63gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN_MODELS = [ + QWEN_LATEST.name, + QWEN_0_5b.name, + QWEN_1_8b.name, + QWEN_4b.name, + QWEN_7b.name, + QWEN_14b.name, + QWEN_32b.name, + QWEN_72b.name, + QWEN_110b.name, +] as const + +// const QWEN_IMAGE_MODELS = [] as const + +// export const QWEN_EMBEDDING_MODELS = [] as const + +// const QWEN_AUDIO_MODELS = [] as const + +// const QWEN_VIDEO_MODELS = [] as const + +// export type QwenChatModels = (typeof QWEN_MODELS)[number] + +// Manual type map for per-model provider options +export type QwenChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN_LATEST.name]: ChatRequest + [QWEN_0_5b.name]: ChatRequest + [QWEN_1_8b.name]: ChatRequest + [QWEN_4b.name]: ChatRequest + [QWEN_7b.name]: ChatRequest + [QWEN_14b.name]: ChatRequest + [QWEN_32b.name]: ChatRequest + [QWEN_72b.name]: ChatRequest + [QWEN_110b.name]: ChatRequest +} + +export type QwenModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN_LATEST.name]: typeof QWEN_LATEST.supports.input + [QWEN_0_5b.name]: typeof QWEN_0_5b.supports.input + [QWEN_1_8b.name]: typeof QWEN_1_8b.supports.input + [QWEN_4b.name]: typeof QWEN_4b.supports.input + [QWEN_7b.name]: typeof QWEN_7b.supports.input + [QWEN_14b.name]: typeof QWEN_14b.supports.input + [QWEN_32b.name]: typeof QWEN_32b.supports.input + [QWEN_72b.name]: typeof QWEN_72b.supports.input + [QWEN_110b.name]: typeof QWEN_110b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts new file mode 100644 index 00000000..a033db9c --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -0,0 +1,121 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_5_CODER_LATEST = { + name: 'qwen2.5-coder:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_0_5b = { + name: 'qwen2.5-coder:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_1_5b = { + name: 'qwen2.5-coder:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_3b = { + name: 'qwen2.5-coder:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_7b = { + name: 'qwen2.5-coder:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_14b = { + name: 'qwen2.5-coder:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_CODER_32b = { + name: 'qwen2.5-coder:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_5_CODER_MODELS = [ + QWEN2_5_CODER_LATEST.name, + QWEN2_5_CODER_0_5b.name, + QWEN2_5_CODER_1_5b.name, + QWEN2_5_CODER_7b.name, + QWEN2_5_CODER_14b.name, + QWEN2_5_CODER_32b.name, +] as const + +// const QWEN2_5_CODER_IMAGE_MODELS = [] as const + +// export const QWEN2_5_CODER_EMBEDDING_MODELS = [] as const + +// const QWEN2_5_CODER_AUDIO_MODELS = [] as const + +// const QWEN2_5_CODER_VIDEO_MODELS = [] as const + +// export type Qwen2_5CoderChatModels = (typeof QWEN2_5_CODER_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5CoderChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_CODER_LATEST.name]: ChatRequest + [QWEN2_5_CODER_0_5b.name]: ChatRequest + [QWEN2_5_CODER_1_5b.name]: ChatRequest + [QWEN2_5_CODER_3b.name]: ChatRequest + [QWEN2_5_CODER_7b.name]: ChatRequest + [QWEN2_5_CODER_14b.name]: ChatRequest + [QWEN2_5_CODER_32b.name]: ChatRequest +} + +export type Qwen2_5CoderModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_CODER_LATEST.name]: typeof QWEN2_5_CODER_LATEST.supports.input + [QWEN2_5_CODER_0_5b.name]: typeof QWEN2_5_CODER_0_5b.supports.input + [QWEN2_5_CODER_1_5b.name]: typeof QWEN2_5_CODER_1_5b.supports.input + [QWEN2_5_CODER_3b.name]: typeof QWEN2_5_CODER_3b.supports.input + [QWEN2_5_CODER_7b.name]: typeof QWEN2_5_CODER_7b.supports.input + [QWEN2_5_CODER_14b.name]: typeof QWEN2_5_CODER_14b.supports.input + [QWEN2_5_CODER_32b.name]: typeof QWEN2_5_CODER_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts new file mode 100644 index 00000000..4827e758 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -0,0 +1,122 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_5_LATEST = { + name: 'qwen2.5:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_0_5b = { + name: 'qwen2.5:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '398mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_1_5b = { + name: 'qwen2.5:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '986mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_3b = { + name: 'qwen2.5:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '1.9gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_7b = { + name: 'qwen2.5:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.7gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_32b = { + name: 'qwen2.5:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_5_72b = { + name: 'qwen2.5:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '47gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_5_MODELS = [ + QWEN2_5_LATEST.name, + QWEN2_5_0_5b.name, + QWEN2_5_1_5b.name, + QWEN2_5_3b.name, + QWEN2_5_7b.name, + QWEN2_5_32b.name, + QWEN2_5_72b.name, +] as const + +// const QWEN2_5_IMAGE_MODELS = [] as const + +// export const QWEN2_5_EMBEDDING_MODELS = [] as const + +// const QWEN2_5_AUDIO_MODELS = [] as const + +// const QWEN2_5_VIDEO_MODELS = [] as const + +// export type Qwen2_5ChatModels = (typeof QWEN2_5_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2_5ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_5_LATEST.name]: ChatRequest + [QWEN2_5_0_5b.name]: ChatRequest + [QWEN2_5_1_5b.name]: ChatRequest + [QWEN2_5_3b.name]: ChatRequest + [QWEN2_5_7b.name]: ChatRequest + [QWEN2_5_32b.name]: ChatRequest + [QWEN2_5_72b.name]: ChatRequest +} + +export type Qwen2_5ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_5_LATEST.name]: typeof QWEN2_5_LATEST.supports.input + [QWEN2_5_0_5b.name]: typeof QWEN2_5_0_5b.supports.input + [QWEN2_5_1_5b.name]: typeof QWEN2_5_1_5b.supports.input + [QWEN2_5_3b.name]: typeof QWEN2_5_3b.supports.input + [QWEN2_5_7b.name]: typeof QWEN2_5_7b.supports.input + [QWEN2_5_32b.name]: typeof QWEN2_5_32b.supports.input + [QWEN2_5_72b.name]: typeof QWEN2_5_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts new file mode 100644 index 00000000..87e42e14 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -0,0 +1,94 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN2_LATEST = { + name: 'qwen2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_0_5b = { + name: 'qwen2:0.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '352mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_1_5b = { + name: 'qwen2:1.5b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '935mb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_7b = { + name: 'qwen2:7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '4.4gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN2_72b = { + name: 'qwen2:72b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '41gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN2_MODELS = [ + QWEN2_LATEST.name, + QWEN2_0_5b.name, + QWEN2_1_5b.name, + QWEN2_7b.name, + QWEN2_72b.name, +] as const + +// const QWEN2_IMAGE_MODELS = [] as const + +// export const QWEN2_EMBEDDING_MODELS = [] as const + +// const QWEN2_AUDIO_MODELS = [] as const + +// const QWEN2_VIDEO_MODELS = [] as const + +// export type Qwen2ChatModels = (typeof QWEN2_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN2_LATEST.name]: ChatRequest + [QWEN2_0_5b.name]: ChatRequest + [QWEN2_1_5b.name]: ChatRequest + [QWEN2_7b.name]: ChatRequest + [QWEN2_72b.name]: ChatRequest +} + +export type Qwen2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN2_LATEST.name]: typeof QWEN2_LATEST.supports.input + [QWEN2_0_5b.name]: typeof QWEN2_0_5b.supports.input + [QWEN2_1_5b.name]: typeof QWEN2_1_5b.supports.input + [QWEN2_7b.name]: typeof QWEN2_7b.supports.input + [QWEN2_72b.name]: typeof QWEN2_72b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts new file mode 100644 index 00000000..b3bcbe99 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -0,0 +1,150 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWEN3_LATEST = { + name: 'qwen3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_0_6b = { + name: 'qwen3:0.6b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '523mb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_1_7b = { + name: 'qwen3:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '1.4gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_4b = { + name: 'qwen3:4b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '2.5gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_8b = { + name: 'qwen3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '5.2gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_14b = { + name: 'qwen3:14b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '9.3gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_30b = { + name: 'qwen3:30b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '19gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_32b = { + name: 'qwen3:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWEN3_235b = { + name: 'qwen3:235b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['thinking', 'tools'], + }, + size: '142gb', + context: 256_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWEN3_MODELS = [ + QWEN3_LATEST.name, + QWEN3_0_6b.name, + QWEN3_1_7b.name, + QWEN3_4b.name, + QWEN3_8b.name, + QWEN3_14b.name, + QWEN3_30b.name, + QWEN3_32b.name, + QWEN3_235b.name, +] as const + +// const QWEN3_IMAGE_MODELS = [] as const + +// export const QWEN3_EMBEDDING_MODELS = [] as const + +// const QWEN3_AUDIO_MODELS = [] as const + +// const QWEN3_VIDEO_MODELS = [] as const + +// export type Qwen3ChatModels = (typeof QWEN3_MODELS)[number] + +// Manual type map for per-model provider options +export type Qwen3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWEN3_LATEST.name]: ChatRequest + [QWEN3_0_6b.name]: ChatRequest + [QWEN3_1_7b.name]: ChatRequest + [QWEN3_4b.name]: ChatRequest + [QWEN3_8b.name]: ChatRequest + [QWEN3_14b.name]: ChatRequest + [QWEN3_30b.name]: ChatRequest + [QWEN3_32b.name]: ChatRequest + [QWEN3_235b.name]: ChatRequest +} + +export type Qwen3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWEN3_LATEST.name]: typeof QWEN3_LATEST.supports.input + [QWEN3_0_6b.name]: typeof QWEN3_0_6b.supports.input + [QWEN3_1_7b.name]: typeof QWEN3_1_7b.supports.input + [QWEN3_4b.name]: typeof QWEN3_4b.supports.input + [QWEN3_8b.name]: typeof QWEN3_8b.supports.input + [QWEN3_14b.name]: typeof QWEN3_14b.supports.input + [QWEN3_30b.name]: typeof QWEN3_30b.supports.input + [QWEN3_32b.name]: typeof QWEN3_32b.supports.input + [QWEN3_235b.name]: typeof QWEN3_235b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts new file mode 100644 index 00000000..41738f15 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -0,0 +1,49 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const QWQ_LATEST = { + name: 'qwq:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +const QWQ_32b = { + name: 'qwq:32b', + supports: { + input: ['text'], + output: ['text'], + capabilities: ['tools'], + }, + size: '20gb', + context: 40_000, +} as const satisfies DefaultOllamaModelMeta + +export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const + +// const QWQ_IMAGE_MODELS = [] as const + +// export const QWQ_EMBEDDING_MODELS = [] as const + +// const QWQ_AUDIO_MODELS = [] as const + +// const QWQ_VIDEO_MODELS = [] as const + +// export type QwqChatModels = (typeof QWQ_MODELS)[number] + +// Manual type map for per-model provider options +export type QwqChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [QWQ_LATEST.name]: ChatRequest + [QWQ_32b.name]: ChatRequest +} + +export type QwqModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [QWQ_LATEST.name]: typeof QWQ_LATEST.supports.input + [QWQ_32b.name]: typeof QWQ_32b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts new file mode 100644 index 00000000..9a6ae9f6 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -0,0 +1,79 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SAILOR2_LATEST = { + name: 'sailor2:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_1b = { + name: 'sailor2:1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.1gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_8b = { + name: 'sailor2:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.2gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SAILOR2_20b = { + name: 'sailor2:20b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '12gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const SAILOR2_MODELS = [ + SAILOR2_LATEST.name, + SAILOR2_8b.name, + SAILOR2_20b.name, +] as const + +// const SAILOR2_IMAGE_MODELS = [] as const + +// export const SAILOR2_EMBEDDING_MODELS = [] as const + +// const SAILOR2_AUDIO_MODELS = [] as const + +// const SAILOR2_VIDEO_MODELS = [] as const + +// export type Sailor2ChatModels = (typeof SAILOR2_MODELS)[number] + +// Manual type map for per-model provider options +export type Sailor2ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SAILOR2_LATEST.name]: ChatRequest + [SAILOR2_1b.name]: ChatRequest + [SAILOR2_8b.name]: ChatRequest + [SAILOR2_20b.name]: ChatRequest +} + +export type Sailor2ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SAILOR2_LATEST.name]: typeof SAILOR2_LATEST.supports.input + [SAILOR2_1b.name]: typeof SAILOR2_1b.supports.input + [SAILOR2_8b.name]: typeof SAILOR2_8b.supports.input + [SAILOR2_20b.name]: typeof SAILOR2_20b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts new file mode 100644 index 00000000..62fa1e6f --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SHIELDGEMMA_LATEST = { + name: 'shieldgemma:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_2b = { + name: 'shieldgemma:2b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '1.7gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_9b = { + name: 'shieldgemma:9b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '5.8gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +const SHIELDGEMMA_27b = { + name: 'shieldgemma:27b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '17gb', + context: 8_000, +} as const satisfies DefaultOllamaModelMeta + +export const SHIELDGEMMA_MODELS = [ + SHIELDGEMMA_LATEST.name, + SHIELDGEMMA_2b.name, + SHIELDGEMMA_9b.name, + SHIELDGEMMA_27b.name, +] as const + +// const SHIELDGEMMA_IMAGE_MODELS = [] as const + +// export const SHIELDGEMMA_EMBEDDING_MODELS = [] as const + +// const SHIELDGEMMA_AUDIO_MODELS = [] as const + +// const SHIELDGEMMA_VIDEO_MODELS = [] as const + +// export type ShieldgemmaChatModels = (typeof SHIELDGEMMA_MODELS)[number] + +// Manual type map for per-model provider options +export type ShieldgemmaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SHIELDGEMMA_LATEST.name]: ChatRequest + [SHIELDGEMMA_2b.name]: ChatRequest + [SHIELDGEMMA_9b.name]: ChatRequest + [SHIELDGEMMA_27b.name]: ChatRequest +} + +export type ShieldgemmaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SHIELDGEMMA_LATEST.name]: typeof SHIELDGEMMA_LATEST.supports.input + [SHIELDGEMMA_2b.name]: typeof SHIELDGEMMA_2b.supports.input + [SHIELDGEMMA_9b.name]: typeof SHIELDGEMMA_9b.supports.input + [SHIELDGEMMA_27b.name]: typeof SHIELDGEMMA_27b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts new file mode 100644 index 00000000..eafdeb8e --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SMALLTINKER_LATEST = { + name: 'smalltinker:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +const SMALLTINKER_3b = { + name: 'smalltinker:3b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '3.6gb', + context: 32_000, +} as const satisfies DefaultOllamaModelMeta + +export const SMALLTINKER_MODELS = [ + SMALLTINKER_LATEST.name, + SMALLTINKER_3b.name, +] as const + +// const SMALLTINKER_IMAGE_MODELS = [] as const + +// export const SMALLTINKER_EMBEDDING_MODELS = [] as const + +// const SMALLTINKER_AUDIO_MODELS = [] as const + +// const SMALLTINKER_VIDEO_MODELS = [] as const + +// export type SmalltinkerChatModels = (typeof SMALLTINKER_MODELS)[number] + +// Manual type map for per-model provider options +export type SmalltinkerChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMALLTINKER_LATEST.name]: ChatRequest + [SMALLTINKER_3b.name]: ChatRequest +} + +export type SmalltinkerModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMALLTINKER_LATEST.name]: typeof SMALLTINKER_LATEST.supports.input + [SMALLTINKER_3b.name]: typeof SMALLTINKER_3b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts new file mode 100644 index 00000000..79ebc939 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -0,0 +1,80 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const SMOLLM_LATEST = { + name: 'smollm:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_135m = { + name: 'smollm:135m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '92mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_360m = { + name: 'smollm:360m', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '229mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const SMOLLM_1_7b = { + name: 'smollm:1.7b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '991mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const SMOLLM_MODELS = [ + SMOLLM_LATEST.name, + SMOLLM_135m.name, + SMOLLM_360m.name, + SMOLLM_1_7b.name, +] as const + +// const SMOLLM_IMAGE_MODELS = [] as const + +// export const SMOLLM_EMBEDDING_MODELS = [] as const + +// const SMOLLM_AUDIO_MODELS = [] as const + +// const SMOLLM_VIDEO_MODELS = [] as const + +// export type SmollmChatModels = (typeof SMOLLM_MODELS)[number] + +// Manual type map for per-model provider options +export type SmollmChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [SMOLLM_LATEST.name]: ChatRequest + [SMOLLM_135m.name]: ChatRequest + [SMOLLM_360m.name]: ChatRequest + [SMOLLM_1_7b.name]: ChatRequest +} + +export type SmollmModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [SMOLLM_LATEST.name]: typeof SMOLLM_LATEST.supports.input + [SMOLLM_135m.name]: typeof SMOLLM_135m.supports.input + [SMOLLM_360m.name]: typeof SMOLLM_360m.supports.input + [SMOLLM_1_7b.name]: typeof SMOLLM_1_7b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts new file mode 100644 index 00000000..a4b0e110 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -0,0 +1,52 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const TINNYLLAMA_LATEST = { + name: 'tinnyllama:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +const TINNYLLAMA_1_1b = { + name: 'tinnyllama:1.1b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '638mb', + context: 2_000, +} as const satisfies DefaultOllamaModelMeta + +export const TINNYLLAMA_MODELS = [ + TINNYLLAMA_LATEST.name, + TINNYLLAMA_1_1b.name, +] as const + +// const TINNYLLAMA_IMAGE_MODELS = [] as const + +// export const TINNYLLAMA_EMBEDDING_MODELS = [] as const + +// const TINNYLLAMA_AUDIO_MODELS = [] as const + +// const TINNYLLAMA_VIDEO_MODELS = [] as const + +// export type TinnyllamaChatModels = (typeof TINNYLLAMA_MODELS)[number] + +// Manual type map for per-model provider options +export type TinnyllamaChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TINNYLLAMA_LATEST.name]: ChatRequest + [TINNYLLAMA_1_1b.name]: ChatRequest +} + +export type TinnyllamaModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TINNYLLAMA_LATEST.name]: typeof TINNYLLAMA_LATEST.supports.input + [TINNYLLAMA_1_1b.name]: typeof TINNYLLAMA_1_1b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts new file mode 100644 index 00000000..c76e6519 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -0,0 +1,66 @@ +import type { ChatRequest } from 'ollama' +import type { DefaultOllamaModelMeta } from './models-meta' + +const TULU3_LATEST = { + name: 'tulu3:latest', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const TULU3_8b = { + name: 'tulu3:8b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '4.9gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +const TULU3_70b = { + name: 'tulu3:70b', + supports: { + input: ['text'], + output: ['text'], + capabilities: [], + }, + size: '43gb', + context: 128_000, +} as const satisfies DefaultOllamaModelMeta + +export const TULU3_MODELS = [ + TULU3_LATEST.name, + TULU3_8b.name, + TULU3_70b.name, +] as const + +// const TULU3_IMAGE_MODELS = [] as const + +// export const TULU3_EMBEDDING_MODELS = [] as const + +// const TULU3_AUDIO_MODELS = [] as const + +// const TULU3_VIDEO_MODELS = [] as const + +// export type Tulu3ChatModels = (typeof TULU3_MODELS)[number] + +// Manual type map for per-model provider options +export type Tulu3ChatModelProviderOptionsByName = { + // Models with thinking and structured output support + [TULU3_LATEST.name]: ChatRequest + [TULU3_8b.name]: ChatRequest + [TULU3_70b.name]: ChatRequest +} + +export type Tulu3ModelInputModalitiesByName = { + // Models with text, image, audio, video (no document) + [TULU3_LATEST.name]: typeof TULU3_LATEST.supports.input + [TULU3_8b.name]: typeof TULU3_8b.supports.input + [TULU3_70b.name]: typeof TULU3_70b.supports.input +} diff --git a/packages/typescript/ai-ollama/src/meta/models-meta.ts b/packages/typescript/ai-ollama/src/meta/models-meta.ts new file mode 100644 index 00000000..099432b4 --- /dev/null +++ b/packages/typescript/ai-ollama/src/meta/models-meta.ts @@ -0,0 +1,11 @@ +export interface DefaultOllamaModelMeta { + name: string + providerOptions?: TProviderOptions + supports?: { + input?: Array<'text' | 'image' | 'video'> + output?: Array<'text' | 'image' | 'video'> + capabilities?: Array<'tools' | 'thinking' | 'vision' | 'embedding'> + } + size?: string + context?: number +} diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts new file mode 100644 index 00000000..1b9a3e8c --- /dev/null +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -0,0 +1,265 @@ +// constants +import { ATHENE_MODELS } from './meta/model-meta-athene' +import { AYA_MODELS } from './meta/model-meta-aya' +import { CODEGEMMA_MODELS } from './meta/model-meta-codegemma' +import { CODELLAMA_MODELS } from './meta/model-meta-codellama' +import { COMMAND_R_MODELS } from './meta/model-meta-command-r' +import { COMMAND_R_PLUS_MODELS } from './meta/model-meta-command-r-plus' +import { COMMAND_R_7b_MODELS } from './meta/model-meta-command-r7b' +import { DEEPSEEK_CODER_V2_MODELS } from './meta/model-meta-deepseek-coder-v2' +import { DEEPSEEK_OCR_MODELS } from './meta/model-meta-deepseek-ocr' +import { DEEPSEEK_R1_MODELS } from './meta/model-meta-deepseek-r1' +import { DEEPSEEK_V3_1_MODELS } from './meta/model-meta-deepseek-v3.1' +import { DEVSTRAL_MODELS } from './meta/model-meta-devstral' +import { DOLPHIN3_MODELS } from './meta/model-meta-dolphin3' +import { EXAONE3_5MODELS } from './meta/model-meta-exaone3.5' +import { FALCON2_MODELS } from './meta/model-meta-falcon2' +import { FALCON3_MODELS } from './meta/model-meta-falcon3' +import { FIREFUNCTION_V2_MODELS } from './meta/model-meta-firefunction-v2' +import { GEMMA_MODELS } from './meta/model-meta-gemma' +import { GEMMA2_MODELS } from './meta/model-meta-gemma2' +import { GEMMA3_MODELS } from './meta/model-meta-gemma3' +import { GRANITE3_DENSE_MODELS } from './meta/model-meta-granite3-dense' +import { GRANITE3_GUARDIAN_MODELS } from './meta/model-meta-granite3-guardian' +import { GRANITE3_MOE_MODELS } from './meta/model-meta-granite3-moe' +import { GRANITE3_1_DENSE_MODELS } from './meta/model-meta-granite3.1-dense' +import { GRANITE3_1_MOE_MODELS } from './meta/model-meta-granite3.1-moe' +import { LLAMA_GUARD3_MODELS } from './meta/model-meta-llama-guard3' +import { LLAMA2_MODELS } from './meta/model-meta-llama2' +import { LLAMA3_MODELS } from './meta/model-meta-llama3' +import { LLAMA3_CHATQA_MODELS } from './meta/model-meta-llama3-chatqa' +import { LLAMA3_GRADIENT_MODELS } from './meta/model-meta-llama3-gradient' +import { LLAMA3_1_MODELS } from './meta/model-meta-llama3.1' +import { LLAMA3_2_MODELS } from './meta/model-meta-llama3.2' +import { LLAMA3_2_VISION_MODELS } from './meta/model-meta-llama3.2-vision' +import { LLAMA3_3_MODELS } from './meta/model-meta-llama3.3' +import { LLAMA4_MODELS } from './meta/model-meta-llama4' +import { LLAVA_MODELS } from './meta/model-meta-llava' +import { LLAVA_LLAMA3_MODELS } from './meta/model-meta-llava-llama3' +import { LLAVA_PHI3_MODELS } from './meta/model-meta-llava-phi3' +import { MARCO_O1_MODELS } from './meta/model-meta-marco-o1' +import { MISTRAL_MODELS } from './meta/model-meta-mistral' +import { MISTRAL_LARGE_MODELS } from './meta/model-meta-mistral-large' +import { MISTRAL_NEMO_MODELS } from './meta/model-meta-mistral-nemo' +import { MISTRAL_SMALL_MODELS } from './meta/model-meta-mistral-small' +import { MIXTRAL_MODELS } from './meta/model-meta-mixtral' +import { MOONDREAM_MODELS } from './meta/model-meta-moondream' +import { NEMOTRON_MODELS } from './meta/model-meta-nemotron' +import { NEMOTRON_MINI_MODELS } from './meta/model-meta-nemotron-mini' +import { OLMO2_MODELS } from './meta/model-meta-olmo2' +import { OPENCODER_MODELS } from './meta/model-meta-opencoder' +import { OPENHERMES_MODELS } from './meta/model-meta-openhermes' +import { PHI3_MODELS } from './meta/model-meta-phi3' +import { PHI4_MODELS } from './meta/model-meta-phi4' +import { QWEN_MODELS } from './meta/model-meta-qwen' +import { QWEN2_MODELS } from './meta/model-meta-qwen2' +import { QWEN2_5_MODELS } from './meta/model-meta-qwen2.5' +import { QWEN2_5_CODER_MODELS } from './meta/model-meta-qwen2.5-coder' +import { QWEN3_MODELS } from './meta/model-meta-qwen3' +import { QWQ_MODELS } from './meta/model-meta-qwq' +import { SAILOR2_MODELS } from './meta/model-meta-sailor2' +import { SHIELDGEMMA_MODELS } from './meta/model-meta-shieldgemma' +import { SMALLTINKER_MODELS } from './meta/model-meta-smalltinker' +import { SMOLLM_MODELS } from './meta/model-meta-smollm' +import { TINNYLLAMA_MODELS } from './meta/model-meta-tinyllama' +import { TULU3_MODELS } from './meta/model-meta-tulu3' + +// types +import type { AtheneModelInputModalitiesByName } from './meta/model-meta-athene' +import type { AyaModelInputModalitiesByName } from './meta/model-meta-aya' +import type { CodegemmaModelInputModalitiesByName } from './meta/model-meta-codegemma' +import type { CodellamaModelInputModalitiesByName } from './meta/model-meta-codellama' +import type { CommandRModelInputModalitiesByName } from './meta/model-meta-command-r' +import type { CommandRPlusModelInputModalitiesByName } from './meta/model-meta-command-r-plus' +import type { CommandR7bModelInputModalitiesByName } from './meta/model-meta-command-r7b' +import type { DeepseekCoderV2ModelInputModalitiesByName } from './meta/model-meta-deepseek-coder-v2' +import type { DeepseekOcrModelInputModalitiesByName } from './meta/model-meta-deepseek-ocr' +import type { DeepseekR1ModelInputModalitiesByName } from './meta/model-meta-deepseek-r1' +import type { Deepseekv3_1ModelInputModalitiesByName } from './meta/model-meta-deepseek-v3.1' +import type { DevstralModelInputModalitiesByName } from './meta/model-meta-devstral' +import type { Dolphin3ModelInputModalitiesByName } from './meta/model-meta-dolphin3' +import type { Exaone3_5ModelInputModalitiesByName } from './meta/model-meta-exaone3.5' +import type { Falcon2ModelInputModalitiesByName } from './meta/model-meta-falcon2' +import type { Falcon3ModelInputModalitiesByName } from './meta/model-meta-falcon3' +import type { Firefunction_V2ModelInputModalitiesByName } from './meta/model-meta-firefunction-v2' +import type { GemmaModelInputModalitiesByName } from './meta/model-meta-gemma' +import type { Gemma2ModelInputModalitiesByName } from './meta/model-meta-gemma2' +import type { Gemma3ModelInputModalitiesByName } from './meta/model-meta-gemma3' +import type { Granite3DenseModelInputModalitiesByName } from './meta/model-meta-granite3-dense' +import type { Granite3GuardianModelInputModalitiesByName } from './meta/model-meta-granite3-guardian' +import type { Granite3MoeModelInputModalitiesByName } from './meta/model-meta-granite3-moe' +import type { Granite3_1DenseModelInputModalitiesByName } from './meta/model-meta-granite3.1-dense' +import type { Granite3_1MoeModelInputModalitiesByName } from './meta/model-meta-granite3.1-moe' +import type { LlamaGuard3ModelInputModalitiesByName } from './meta/model-meta-llama-guard3' +import type { Llama2ModelInputModalitiesByName } from './meta/model-meta-llama2' +import type { Llama3ModelInputModalitiesByName } from './meta/model-meta-llama3' +import type { Llama3ChatQaModelInputModalitiesByName } from './meta/model-meta-llama3-chatqa' +import type { Llama3GradientModelInputModalitiesByName } from './meta/model-meta-llama3-gradient' +import type { Llama3_1ModelInputModalitiesByName } from './meta/model-meta-llama3.1' +import type { Llama3_2ModelInputModalitiesByName } from './meta/model-meta-llama3.2' +import type { Llama3_2VisionModelInputModalitiesByName } from './meta/model-meta-llama3.2-vision' +import type { Llama3_3ModelInputModalitiesByName } from './meta/model-meta-llama3.3' +import type { Llama3_4ModelInputModalitiesByName } from './meta/model-meta-llama4' +import type { llavaModelInputModalitiesByName } from './meta/model-meta-llava' +import type { LlavaLlamaModelInputModalitiesByName } from './meta/model-meta-llava-llama3' +import type { LlavaPhi3ModelInputModalitiesByName } from './meta/model-meta-llava-phi3' +import type { MarcoO1ModelInputModalitiesByName } from './meta/model-meta-marco-o1' +import type { MistralModelInputModalitiesByName } from './meta/model-meta-mistral' +import type { MistralLargeModelInputModalitiesByName } from './meta/model-meta-mistral-large' +import type { MistralNemoModelInputModalitiesByName } from './meta/model-meta-mistral-nemo' +import type { MistralSmallModelInputModalitiesByName } from './meta/model-meta-mistral-small' +import type { MixtralModelInputModalitiesByName } from './meta/model-meta-mixtral' +import type { MoondreamModelInputModalitiesByName } from './meta/model-meta-moondream' +import type { NemotronModelInputModalitiesByName } from './meta/model-meta-nemotron' +import type { NemotronMiniModelInputModalitiesByName } from './meta/model-meta-nemotron-mini' +import type { Olmo2ModelInputModalitiesByName } from './meta/model-meta-olmo2' +import type { OpencoderModelInputModalitiesByName } from './meta/model-meta-opencoder' +import type { OpenhermesModelInputModalitiesByName } from './meta/model-meta-openhermes' +import type { Phi3ModelInputModalitiesByName } from './meta/model-meta-phi3' +import type { Phi4ModelInputModalitiesByName } from './meta/model-meta-phi4' +import type { QwenModelInputModalitiesByName } from './meta/model-meta-qwen' +import type { Qwen2ModelInputModalitiesByName } from './meta/model-meta-qwen2' +import type { Qwen2_5ModelInputModalitiesByName } from './meta/model-meta-qwen2.5' +import type { Qwen2_5CoderModelInputModalitiesByName } from './meta/model-meta-qwen2.5-coder' +import type { Qwen3ModelInputModalitiesByName } from './meta/model-meta-qwen3' +import type { QwqModelInputModalitiesByName } from './meta/model-meta-qwq' +import type { Sailor2ModelInputModalitiesByName } from './meta/model-meta-sailor2' +import type { ShieldgemmaModelInputModalitiesByName } from './meta/model-meta-shieldgemma' +import type { SmalltinkerModelInputModalitiesByName } from './meta/model-meta-smalltinker' +import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' +import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' +import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' + +export const OLLAMA_MODELS = [ + ...ATHENE_MODELS, + ...AYA_MODELS, + ...CODEGEMMA_MODELS, + ...CODELLAMA_MODELS, + ...COMMAND_R_PLUS_MODELS, + ...COMMAND_R_MODELS, + ...COMMAND_R_7b_MODELS, + ...DEEPSEEK_CODER_V2_MODELS, + ...DEEPSEEK_OCR_MODELS, + ...DEEPSEEK_R1_MODELS, + ...DEEPSEEK_V3_1_MODELS, + ...DEVSTRAL_MODELS, + ...DOLPHIN3_MODELS, + ...EXAONE3_5MODELS, + ...FALCON2_MODELS, + ...FALCON3_MODELS, + ...FIREFUNCTION_V2_MODELS, + ...GEMMA_MODELS, + ...GEMMA2_MODELS, + ...GEMMA3_MODELS, + ...GRANITE3_DENSE_MODELS, + ...GRANITE3_GUARDIAN_MODELS, + ...GRANITE3_MOE_MODELS, + ...GRANITE3_1_DENSE_MODELS, + ...GRANITE3_1_MOE_MODELS, + ...LLAMA_GUARD3_MODELS, + ...LLAMA2_MODELS, + ...LLAMA3_CHATQA_MODELS, + ...LLAMA3_GRADIENT_MODELS, + ...LLAMA3_1_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_2_VISION_MODELS, + ...LLAMA3_2_MODELS, + ...LLAMA3_3_MODELS, + ...LLAMA3_MODELS, + ...LLAMA4_MODELS, + ...LLAVA_LLAMA3_MODELS, + ...LLAVA_PHI3_MODELS, + ...LLAVA_MODELS, + ...MARCO_O1_MODELS, + ...MISTRAL_LARGE_MODELS, + ...MISTRAL_NEMO_MODELS, + ...MISTRAL_SMALL_MODELS, + ...MISTRAL_MODELS, + ...MIXTRAL_MODELS, + ...MOONDREAM_MODELS, + ...NEMOTRON_MINI_MODELS, + ...NEMOTRON_MODELS, + ...OLMO2_MODELS, + ...OPENCODER_MODELS, + ...OPENHERMES_MODELS, + ...PHI3_MODELS, + ...PHI4_MODELS, + ...QWEN_MODELS, + ...QWEN2_5_CODER_MODELS, + ...QWEN2_5_MODELS, + ...QWEN2_MODELS, + ...QWEN3_MODELS, + ...QWQ_MODELS, + ...SAILOR2_MODELS, + ...SHIELDGEMMA_MODELS, + ...SMALLTINKER_MODELS, + ...SMOLLM_MODELS, + ...TINNYLLAMA_MODELS, + ...TULU3_MODELS, +] as const + +export type OllamaModelInputModalitiesByName = + AtheneModelInputModalitiesByName & + AyaModelInputModalitiesByName & + CodegemmaModelInputModalitiesByName & + CodellamaModelInputModalitiesByName & + CommandRPlusModelInputModalitiesByName & + CommandRModelInputModalitiesByName & + CommandR7bModelInputModalitiesByName & + DeepseekCoderV2ModelInputModalitiesByName & + DeepseekOcrModelInputModalitiesByName & + DeepseekR1ModelInputModalitiesByName & + Deepseekv3_1ModelInputModalitiesByName & + DevstralModelInputModalitiesByName & + Dolphin3ModelInputModalitiesByName & + Exaone3_5ModelInputModalitiesByName & + Falcon2ModelInputModalitiesByName & + Falcon3ModelInputModalitiesByName & + Firefunction_V2ModelInputModalitiesByName & + GemmaModelInputModalitiesByName & + Gemma2ModelInputModalitiesByName & + Gemma3ModelInputModalitiesByName & + Granite3DenseModelInputModalitiesByName & + Granite3GuardianModelInputModalitiesByName & + Granite3MoeModelInputModalitiesByName & + Granite3_1DenseModelInputModalitiesByName & + Granite3_1MoeModelInputModalitiesByName & + LlamaGuard3ModelInputModalitiesByName & + Llama2ModelInputModalitiesByName & + Llama3ChatQaModelInputModalitiesByName & + Llama3GradientModelInputModalitiesByName & + Llama3_1ModelInputModalitiesByName & + Llama3_2VisionModelInputModalitiesByName & + Llama3_2ModelInputModalitiesByName & + Llama3_3ModelInputModalitiesByName & + Llama3ModelInputModalitiesByName & + Llama3_4ModelInputModalitiesByName & + LlavaLlamaModelInputModalitiesByName & + LlavaPhi3ModelInputModalitiesByName & + llavaModelInputModalitiesByName & + MarcoO1ModelInputModalitiesByName & + MistralLargeModelInputModalitiesByName & + MistralNemoModelInputModalitiesByName & + MistralSmallModelInputModalitiesByName & + MistralModelInputModalitiesByName & + MixtralModelInputModalitiesByName & + MoondreamModelInputModalitiesByName & + NemotronMiniModelInputModalitiesByName & + NemotronModelInputModalitiesByName & + Olmo2ModelInputModalitiesByName & + OpencoderModelInputModalitiesByName & + OpenhermesModelInputModalitiesByName & + Phi3ModelInputModalitiesByName & + Phi4ModelInputModalitiesByName & + QwenModelInputModalitiesByName & + Qwen2_5CoderModelInputModalitiesByName & + Qwen2_5ModelInputModalitiesByName & + Qwen2ModelInputModalitiesByName & + Qwen3ModelInputModalitiesByName & + QwqModelInputModalitiesByName & + Sailor2ModelInputModalitiesByName & + ShieldgemmaModelInputModalitiesByName & + SmalltinkerModelInputModalitiesByName & + SmollmModelInputModalitiesByName & + TinnyllamaModelInputModalitiesByName & + Tulu3ModelInputModalitiesByName diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts new file mode 100644 index 00000000..0c457861 --- /dev/null +++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts @@ -0,0 +1,481 @@ +import { Ollama as OllamaSDK } from 'ollama' +import { BaseAdapter, convertZodToJsonSchema } from '@tanstack/ai' + +import { OLLAMA_MODELS } from './model-meta' + +import type { OllamaModelInputModalitiesByName } from './model-meta' +import type { + AbortableAsyncIterator, + ChatRequest, + ChatResponse, + Message, + Tool as OllamaTool, + ToolCall, +} from 'ollama' +import type { + ChatOptions, + DefaultMessageMetadataByModality, + EmbeddingOptions, + EmbeddingResult, + StreamChunk, + SummarizationOptions, + SummarizationResult, + Tool, +} from '@tanstack/ai' + +export interface OllamaConfig { + host?: string +} + +const OLLAMA_EMBEDDING_MODELS = [] as const + +/** + * Type-only map from Ollama model name to its provider-specific options. + * Ollama models share the same options interface. + */ +export type OllamaChatModelProviderOptionsByName = { + [K in (typeof OLLAMA_MODELS)[number]]: OllamaProviderOptions +} + +/** + * Ollama-specific provider options + * Based on Ollama API options + * @see https://github.com/ollama/ollama/blob/main/docs/api.md + */ +interface OllamaProviderOptions { + /** Number of tokens to keep from the prompt */ + num_keep?: number + /** Number of tokens from context to consider for next token prediction */ + top_k?: number + /** Minimum probability for nucleus sampling */ + min_p?: number + /** Tail-free sampling parameter */ + tfs_z?: number + /** Typical probability sampling parameter */ + typical_p?: number + /** Number of previous tokens to consider for repetition penalty */ + repeat_last_n?: number + /** Penalty for repeating tokens */ + repeat_penalty?: number + /** Enable Mirostat sampling (0=disabled, 1=Mirostat, 2=Mirostat 2.0) */ + mirostat?: number + /** Target entropy for Mirostat */ + mirostat_tau?: number + /** Learning rate for Mirostat */ + mirostat_eta?: number + /** Enable penalize_newline */ + penalize_newline?: boolean + /** Enable NUMA support */ + numa?: boolean + /** Context window size */ + num_ctx?: number + /** Batch size for prompt processing */ + num_batch?: number + /** Number of GQA groups (for some models) */ + num_gqa?: number + /** Number of GPU layers to use */ + num_gpu?: number + /** GPU to use for inference */ + main_gpu?: number + /** Use memory-mapped model */ + use_mmap?: boolean + /** Use memory-locked model */ + use_mlock?: boolean + /** Number of threads to use */ + num_thread?: number +} + +export class Ollama extends BaseAdapter< + typeof OLLAMA_MODELS, + typeof OLLAMA_EMBEDDING_MODELS, + OllamaProviderOptions, + Record, + OllamaChatModelProviderOptionsByName, + OllamaModelInputModalitiesByName, + DefaultMessageMetadataByModality +> { + name = 'ollama' as const + models = OLLAMA_MODELS + embeddingModels = OLLAMA_EMBEDDING_MODELS + + // Type-only map used by core AI to infer per-model provider options. + // This is never set at runtime; it exists purely for TypeScript. + declare _modelProviderOptionsByName: OllamaChatModelProviderOptionsByName + // Type-only map for model input modalities; used for multimodal content type constraints + declare _modelInputModalitiesByName: OllamaModelInputModalitiesByName + // Type-only map for message metadata types; used for type-safe metadata autocomplete + declare _messageMetadataByModality: DefaultMessageMetadataByModality + + private client: OllamaSDK + + constructor(config: OllamaConfig = {}) { + super({}) + this.client = new OllamaSDK({ + host: config.host || 'http://localhost:11434', + }) + } + + async *chatStream(options: ChatOptions): AsyncIterable { + // Use stream converter for now + // Map common options to Ollama format + const mappedOptions = this.mapCommonOptionsToOllama(options) + const response = await this.client.chat({ + ...mappedOptions, + stream: true, + }) + yield* this.processOllamaStreamChunks(response) + } + + async summarize(options: SummarizationOptions): Promise { + const prompt = this.buildSummarizationPrompt(options, options.text) + + const response = await this.client.generate({ + model: options.model || 'llama2', + prompt, + options: { + temperature: 0.3, + num_predict: options.maxLength || 500, + }, + stream: false, + }) + + const promptTokens = this.estimateTokens(prompt) + const completionTokens = this.estimateTokens(response.response) + + return { + id: this.generateId(), + model: response.model, + summary: response.response, + usage: { + promptTokens, + completionTokens, + totalTokens: promptTokens + completionTokens, + }, + } + } + + async createEmbeddings(options: EmbeddingOptions): Promise { + const inputs = Array.isArray(options.input) + ? options.input + : [options.input] + const embeddings: Array> = [] + + for (const input of inputs) { + const response = await this.client.embeddings({ + model: options.model || 'nomic-embed-text', + prompt: input, + }) + embeddings.push(response.embedding) + } + + const promptTokens = inputs.reduce( + (sum, input) => sum + this.estimateTokens(input), + 0, + ) + + return { + id: this.generateId(), + model: options.model || 'nomic-embed-text', + embeddings, + usage: { + promptTokens, + totalTokens: promptTokens, + }, + } + } + + private buildSummarizationPrompt( + options: SummarizationOptions, + text: string, + ): string { + let prompt = 'You are a professional summarizer. ' + + switch (options.style) { + case 'bullet-points': + prompt += 'Provide a summary in bullet point format. ' + break + case 'paragraph': + prompt += 'Provide a summary in paragraph format. ' + break + case 'concise': + prompt += 'Provide a very concise summary in 1-2 sentences. ' + break + default: + prompt += 'Provide a clear and concise summary. ' + } + + if (options.focus && options.focus.length > 0) { + prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` + } + + prompt += `\n\nText to summarize:\n${text}\n\nSummary:` + + return prompt + } + + private estimateTokens(text: string): number { + // Rough approximation: 1 token ≈ 4 characters + return Math.ceil(text.length / 4) + } + + private async *processOllamaStreamChunks( + stream: AbortableAsyncIterator, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = Date.now() + const responseId: string = this.generateId() + let accumulatedReasoning = '' + let hasEmittedToolCalls = false + for await (const chunk of stream) { + function handleToolCall(toolCall: ToolCall): StreamChunk { + // we cast because the library types are missing id and index + const actualToolCall = toolCall as ToolCall & { + id: string + function: { index: number } + } + return { + type: 'tool_call', + id: responseId, + model: chunk.model, + timestamp, + toolCall: { + type: 'function', + id: actualToolCall.id, + function: { + name: actualToolCall.function.name || '', + arguments: + typeof actualToolCall.function.arguments === 'string' + ? actualToolCall.function.arguments + : JSON.stringify(actualToolCall.function.arguments), + }, + }, + index: actualToolCall.function.index, + } + } + if (chunk.done) { + if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { + for (const toolCall of chunk.message.tool_calls) { + yield handleToolCall(toolCall) + hasEmittedToolCalls = true + } + yield { + type: 'done', + id: responseId || this.generateId(), + model: chunk.model, + timestamp, + finishReason: 'tool_calls', + } + continue + } + yield { + type: 'done', + id: responseId || this.generateId(), + model: chunk.model, + timestamp, + finishReason: hasEmittedToolCalls ? 'tool_calls' : 'stop', + } + continue + } + if (chunk.message.content) { + accumulatedContent += chunk.message.content + yield { + type: 'content', + id: responseId || this.generateId(), + model: chunk.model, + timestamp, + delta: chunk.message.content, + content: accumulatedContent, + role: 'assistant', + } + } + + if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { + for (const toolCall of chunk.message.tool_calls) { + yield handleToolCall(toolCall) + hasEmittedToolCalls = true + } + } + if (chunk.message.thinking) { + accumulatedReasoning += chunk.message.thinking + yield { + type: 'thinking', + id: responseId || this.generateId(), + model: chunk.model, + timestamp, + content: accumulatedReasoning, + delta: chunk.message.thinking, + } + } + } + } + + /** + * Converts standard Tool format to Ollama-specific tool format + * Ollama uses OpenAI-compatible tool format + */ + private convertToolsToOllamaFormat( + tools?: Array, + ): Array | undefined { + if (!tools || tools.length === 0) { + return undefined + } + + return tools.map((tool) => ({ + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: convertZodToJsonSchema(tool.inputSchema), + }, + })) + } + + /** + * Formats messages for Ollama, handling tool calls, tool results, and multimodal content + */ + private formatMessages(messages: ChatOptions['messages']): Array { + return messages.map((msg) => { + let textContent = '' + const images: Array = [] + + // Handle multimodal content + if (Array.isArray(msg.content)) { + for (const part of msg.content) { + if (part.type === 'text') { + textContent += part.content + } else if (part.type === 'image') { + // Ollama accepts base64 strings for images + if (part.source.type === 'data') { + images.push(part.source.value) + } else { + // URL-based images not directly supported, but we pass the URL + // Ollama may need the image to be fetched externally + images.push(part.source.value) + } + } + // Ollama doesn't support audio/video/document directly, skip them + } + } else { + textContent = msg.content || '' + } + + const hasToolCallId = msg.role === 'tool' && msg.toolCallId + return { + role: hasToolCallId ? 'tool' : msg.role, + content: hasToolCallId + ? typeof msg.content === 'string' + ? msg.content + : JSON.stringify(msg.content) + : textContent, + // Add images if present + ...(images.length > 0 ? { images: images } : {}), + ...(msg.role === 'assistant' && + msg.toolCalls && + msg.toolCalls.length > 0 + ? { + tool_calls: msg.toolCalls.map((toolCall) => { + // Parse string arguments to object for Ollama + let parsedArguments = {} + if (typeof toolCall.function.arguments === 'string') { + try { + parsedArguments = JSON.parse(toolCall.function.arguments) + } catch { + parsedArguments = {} + } + } else { + parsedArguments = toolCall.function.arguments + } + + return { + id: toolCall.id, + type: toolCall.type, + function: { + name: toolCall.function.name, + arguments: parsedArguments, + }, + } + }), + } + : {}), + } + }) + } + + /** + * Maps common options to Ollama-specific format + * Handles translation of normalized options to Ollama's API format + */ + private mapCommonOptionsToOllama(options: ChatOptions): ChatRequest { + const providerOptions = options.providerOptions as + | OllamaProviderOptions + | undefined + const ollamaOptions = { + temperature: options.options?.temperature, + top_p: options.options?.topP, + num_predict: options.options?.maxTokens, + ...providerOptions, + } + + return { + model: options.model, + options: ollamaOptions, + messages: this.formatMessages(options.messages), + tools: this.convertToolsToOllamaFormat(options.tools), + } + } +} + +/** + * Creates an Ollama adapter with simplified configuration + * @param host - Optional Ollama server host (defaults to http://localhost:11434) + * @returns A fully configured Ollama adapter instance + * + * @example + * ```typescript + * const ollama = createOllama(); + * // or with custom host + * const ollama = createOllama("http://localhost:11434"); + * + * const ai = new AI({ + * adapters: { + * ollama, + * } + * }); + * ``` + */ +export function createOllama( + host?: string, + config?: Omit, +): Ollama { + return new Ollama({ host, ...config }) +} + +/** + * Create an Ollama adapter with automatic host detection from environment variables. + * + * Looks for `OLLAMA_HOST` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * Falls back to default Ollama host if not found. + * + * @param config - Optional configuration (excluding host which is auto-detected) + * @returns Configured Ollama adapter instance + * + * @example + * ```typescript + * // Automatically uses OLLAMA_HOST from environment or defaults to http://localhost:11434 + * const aiInstance = ai(ollama()); + * ``` + */ +export function ollama(config?: Omit): Ollama { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const host = env?.OLLAMA_HOST + + return createOllama(host, config) +} From 156e0e61f6d4824a275692c8c7bcc2d1a055ec80 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Mon, 22 Dec 2025 11:52:55 +0100 Subject: [PATCH 2/7] stash --- .../ai-ollama/src/meta/model-meta-athene.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-aya.ts | 27 +++- .../src/meta/model-meta-codegemma.ts | 31 +++- .../src/meta/model-meta-codellama.ts | 47 ++++-- .../src/meta/model-meta-command-r-plus.ts | 33 ++++- .../src/meta/model-meta-command-r.ts | 33 ++++- .../src/meta/model-meta-command-r7b.ts | 33 ++++- .../src/meta/model-meta-deepseek-coder-v2.ts | 31 +++- .../src/meta/model-meta-deepseek-ocr.ts | 24 ++- .../src/meta/model-meta-deepseek-r1.ts | 109 ++++++++++++-- .../src/meta/model-meta-deepseek-v3.1.ts | 52 ++++++- .../ai-ollama/src/meta/model-meta-devstral.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-dolphin3.ts | 23 ++- .../src/meta/model-meta-exaone3.5.ts | 39 +++-- .../ai-ollama/src/meta/model-meta-falcon2.ts | 23 ++- .../ai-ollama/src/meta/model-meta-falcon3.ts | 47 ++++-- .../src/meta/model-meta-firefunction-v2.ts | 23 ++- .../ai-ollama/src/meta/model-meta-gemma.ts | 31 +++- .../ai-ollama/src/meta/model-meta-gemma2.ts | 39 +++-- .../ai-ollama/src/meta/model-meta-gemma3.ts | 56 +++++-- .../src/meta/model-meta-granite3-dense.ts | 31 +++- .../src/meta/model-meta-granite3-guardian.ts | 31 +++- .../src/meta/model-meta-granite3-moe.ts | 45 +++++- .../src/meta/model-meta-granite3.1-dense.ts | 45 +++++- .../src/meta/model-meta-granite3.1-moe.ts | 45 +++++- .../src/meta/model-meta-llama-guard3.ts | 31 +++- .../ai-ollama/src/meta/model-meta-llama2.ts | 39 +++-- .../src/meta/model-meta-llama3-chatqa.ts | 29 +++- .../src/meta/model-meta-llama3-gradient.ts | 29 +++- .../ai-ollama/src/meta/model-meta-llama3.1.ts | 57 ++++++-- .../src/meta/model-meta-llama3.2-vision.ts | 32 +++- .../ai-ollama/src/meta/model-meta-llama3.2.ts | 45 +++++- .../ai-ollama/src/meta/model-meta-llama3.3.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-llama3.ts | 31 +++- .../ai-ollama/src/meta/model-meta-llama4.ts | 45 +++++- .../src/meta/model-meta-llava-llama3.ts | 30 +++- .../src/meta/model-meta-llava-phi3.ts | 24 ++- .../ai-ollama/src/meta/model-meta-llava.ts | 40 +++-- .../ai-ollama/src/meta/model-meta-marco-o1.ts | 23 ++- .../src/meta/model-meta-mistral-large.ts | 33 ++++- .../src/meta/model-meta-mistral-nemo.ts | 33 ++++- .../src/meta/model-meta-mistral-small.ts | 45 +++++- .../ai-ollama/src/meta/model-meta-mistral.ts | 24 ++- .../ai-ollama/src/meta/model-meta-mixtral.ts | 45 +++++- .../src/meta/model-meta-moondream.ts | 24 ++- .../src/meta/model-meta-nemotron-mini.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-nemotron.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-olmo2.ts | 31 +++- .../src/meta/model-meta-opencoder.ts | 31 +++- .../src/meta/model-meta-openhermes.ts | 31 +++- .../ai-ollama/src/meta/model-meta-phi3.ts | 31 +++- .../ai-ollama/src/meta/model-meta-phi4.ts | 23 ++- .../ai-ollama/src/meta/model-meta-qwen.ts | 75 +++++++--- .../src/meta/model-meta-qwen2.5-coder.ts | 93 ++++++++++-- .../ai-ollama/src/meta/model-meta-qwen2.5.ts | 93 ++++++++++-- .../ai-ollama/src/meta/model-meta-qwen2.ts | 73 ++++++++-- .../ai-ollama/src/meta/model-meta-qwen3.ts | 137 +++++++++++++++--- .../ai-ollama/src/meta/model-meta-qwq.ts | 33 ++++- .../ai-ollama/src/meta/model-meta-sailor2.ts | 40 +++-- .../src/meta/model-meta-shieldgemma.ts | 39 +++-- .../src/meta/model-meta-smalltinker.ts | 23 ++- .../ai-ollama/src/meta/model-meta-smollm.ts | 39 +++-- .../src/meta/model-meta-tinyllama.ts | 23 ++- .../ai-ollama/src/meta/model-meta-tulu3.ts | 31 +++- .../ai-ollama/src/meta/models-meta.ts | 85 ++++++++++- .../ai-ollama/src/ollama-adapter.ts | 1 + 66 files changed, 2104 insertions(+), 550 deletions(-) diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts index 5e10c76a..de2820c9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const ATHENE_V2_LATEST = { name: 'athene-v2:latest', @@ -10,7 +15,11 @@ const ATHENE_V2_LATEST = { }, size: '47gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const ATHENE_V2_72b = { name: 'athene-v2:72b', @@ -21,7 +30,11 @@ const ATHENE_V2_72b = { }, size: '47gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const ATHENE_MODELS = [ ATHENE_V2_LATEST.name, @@ -41,8 +54,16 @@ export const ATHENE_MODELS = [ // Manual type map for per-model provider options export type AtheneChatModelProviderOptionsByName = { // Models with thinking and structured output support - [ATHENE_V2_LATEST.name]: ChatRequest - [ATHENE_V2_72b.name]: ChatRequest + [ATHENE_V2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [ATHENE_V2_72b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type AtheneModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts index 38a93989..c4f6f070 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const AYA_LATEST = { name: 'aya:latest', @@ -10,7 +13,9 @@ const AYA_LATEST = { }, size: '4.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const AYA_8b = { name: 'aya:8b', @@ -21,7 +26,9 @@ const AYA_8b = { }, size: '4.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const AYA_35b = { name: 'aya:35b', @@ -32,7 +39,9 @@ const AYA_35b = { }, size: '20gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const @@ -49,9 +58,11 @@ export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const // Manual type map for per-model provider options export type AyaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [AYA_LATEST.name]: ChatRequest - [AYA_8b.name]: ChatRequest - [AYA_35b.name]: ChatRequest + [AYA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [AYA_8b.name]: OllamaModelMeta + [AYA_35b.name]: OllamaModelMeta } export type AyaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts index f424c512..0ef618a8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const CODEGEMMA_LATEST = { name: 'codegemma:latest', @@ -10,7 +13,9 @@ const CODEGEMMA_LATEST = { }, size: '5gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODEGEMMA_8b = { name: 'codegemma:2b', @@ -21,7 +26,9 @@ const CODEGEMMA_8b = { }, size: '1.65gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODEGEMMA_35b = { name: 'codegemma:7b', @@ -32,7 +39,9 @@ const CODEGEMMA_35b = { }, size: '5gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const CODEGEMMA_MODELS = [ CODEGEMMA_LATEST.name, @@ -53,9 +62,15 @@ export const CODEGEMMA_MODELS = [ // Manual type map for per-model provider options export type CodegemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [CODEGEMMA_LATEST.name]: ChatRequest - [CODEGEMMA_8b.name]: ChatRequest - [CODEGEMMA_35b.name]: ChatRequest + [CODEGEMMA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODEGEMMA_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODEGEMMA_35b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type CodegemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts index df9a7786..3458c73b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const CODELLAMA_LATEST = { name: 'codellama:latest', @@ -10,7 +13,9 @@ const CODELLAMA_LATEST = { }, size: '3.8gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODELLAMA_7b = { name: 'codellama:7b', @@ -21,7 +26,9 @@ const CODELLAMA_7b = { }, size: '3.8gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODELLAMA_13b = { name: 'codellama:13b', @@ -32,7 +39,9 @@ const CODELLAMA_13b = { }, size: '7.4gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODELLAMA_34b = { name: 'codellama:34b', @@ -43,7 +52,9 @@ const CODELLAMA_34b = { }, size: '19gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const CODELLAMA_70b = { name: 'codellama:70b', @@ -54,7 +65,9 @@ const CODELLAMA_70b = { }, size: '39gb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const CODELLAMA_MODELS = [ CODELLAMA_LATEST.name, @@ -77,11 +90,21 @@ export const CODELLAMA_MODELS = [ // Manual type map for per-model provider options export type CodellamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [CODELLAMA_LATEST.name]: ChatRequest - [CODELLAMA_7b.name]: ChatRequest - [CODELLAMA_13b.name]: ChatRequest - [CODELLAMA_34b.name]: ChatRequest - [CODELLAMA_70b.name]: ChatRequest + [CODELLAMA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODELLAMA_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODELLAMA_13b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODELLAMA_34b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [CODELLAMA_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type CodellamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts index 84971561..ebecf4f6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const COMMAND_R_PLUS_LATEST = { name: 'command-r-plus:latest', @@ -10,7 +15,11 @@ const COMMAND_R_PLUS_LATEST = { }, size: '59gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const COMMAND_R_PLUS_104b = { name: 'command-r-plus:104b', @@ -21,7 +30,11 @@ const COMMAND_R_PLUS_104b = { }, size: '59gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const COMMAND_R_PLUS_MODELS = [ COMMAND_R_PLUS_LATEST.name, @@ -41,8 +54,16 @@ export const COMMAND_R_PLUS_MODELS = [ // Manual type map for per-model provider options export type CommandRPlusChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_PLUS_LATEST.name]: ChatRequest - [COMMAND_R_PLUS_104b.name]: ChatRequest + [COMMAND_R_PLUS_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [COMMAND_R_PLUS_104b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type CommandRPlusModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts index a4f47e07..fc92dd12 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const COMMAND_R_LATEST = { name: 'command-r:latest', @@ -10,7 +15,11 @@ const COMMAND_R_LATEST = { }, size: '19gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const COMMAND_R_35b = { name: 'command-r:35b', @@ -21,7 +30,11 @@ const COMMAND_R_35b = { }, size: '19gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const COMMAND_R_MODELS = [ COMMAND_R_LATEST.name, @@ -41,8 +54,16 @@ export const COMMAND_R_MODELS = [ // Manual type map for per-model provider options export type CommandRChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_LATEST.name]: ChatRequest - [COMMAND_R_35b.name]: ChatRequest + [COMMAND_R_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [COMMAND_R_35b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type CommandRModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts index e510d404..55dbd7e0 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const COMMAND_R_7b_LATEST = { name: 'command-r7b:latest', @@ -10,7 +15,11 @@ const COMMAND_R_7b_LATEST = { }, size: '5.1gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const COMMAND_R_7b_7b = { name: 'command-r7b:7b', @@ -21,7 +30,11 @@ const COMMAND_R_7b_7b = { }, size: '5.1gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const COMMAND_R_7b_MODELS = [ COMMAND_R_7b_LATEST.name, @@ -41,8 +54,16 @@ export const COMMAND_R_7b_MODELS = [ // Manual type map for per-model provider options export type CommandR7bChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_7b_LATEST.name]: ChatRequest - [COMMAND_R_7b_7b.name]: ChatRequest + [COMMAND_R_7b_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [COMMAND_R_7b_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type CommandR7bModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts index eada87a9..1f722652 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const DEEPSEEK_CODER_V2_LATEST = { name: 'deepseek-coder-v2:latest', @@ -10,7 +13,9 @@ const DEEPSEEK_CODER_V2_LATEST = { }, size: '4.8gb', context: 160_900, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const DEEPSEEK_CODER_V2_16b = { name: 'deepseek-coder-v2:16b', @@ -21,7 +26,9 @@ const DEEPSEEK_CODER_V2_16b = { }, size: '8.9gb', context: 160_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const DEEPSEEK_CODER_V2_236b = { name: 'deepseek-coder-v2:236b', @@ -32,7 +39,9 @@ const DEEPSEEK_CODER_V2_236b = { }, size: '133gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const DEEPSEEK_CODER_V2_MODELS = [ DEEPSEEK_CODER_V2_LATEST.name, @@ -53,9 +62,15 @@ export const DEEPSEEK_CODER_V2_MODELS = [ // Manual type map for per-model provider options export type DeepseekCoderV2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_CODER_V2_LATEST.name]: ChatRequest - [DEEPSEEK_CODER_V2_16b.name]: ChatRequest - [DEEPSEEK_CODER_V2_236b.name]: ChatRequest + [DEEPSEEK_CODER_V2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [DEEPSEEK_CODER_V2_16b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [DEEPSEEK_CODER_V2_236b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type DeepseekCoderV2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts index e3fa0d6c..dcfca427 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const DEEPSEEK_OCR_LATEST = { name: 'deepseek-ocr:latest', @@ -10,7 +14,9 @@ const DEEPSEEK_OCR_LATEST = { }, size: '6.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const DEEPSEEK_OCR_3b = { name: 'deepseek-ocr:3b', @@ -22,7 +28,9 @@ const DEEPSEEK_OCR_3b = { size: '6.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const DEEPSEEK_OCR_MODELS = [ DEEPSEEK_OCR_LATEST.name, @@ -42,8 +50,12 @@ export const DEEPSEEK_OCR_MODELS = [ // Manual type map for per-model provider options export type DeepseekOcrChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_OCR_LATEST.name]: ChatRequest - [DEEPSEEK_OCR_3b.name]: ChatRequest + [DEEPSEEK_OCR_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [DEEPSEEK_OCR_3b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type DeepseekOcrModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts index 470efd36..d6ad2d3f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -1,5 +1,12 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestThinking, + OllamaChatRequestTools, + OllamaMessageThinking, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const DEEPSEEK_R1_LATEST = { name: 'deepseek-r1:latest', @@ -10,7 +17,12 @@ const DEEPSEEK_R1_LATEST = { }, size: '5.2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_1_5b = { name: 'deepseek-r1:1.5b', @@ -21,7 +33,12 @@ const DEEPSEEK_R1_1_5b = { }, size: '1.1gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_7b = { name: 'deepseek-r1:7b', @@ -32,7 +49,12 @@ const DEEPSEEK_R1_7b = { }, size: '4.7gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_8b = { name: 'deepseek-r1:8b', @@ -43,7 +65,12 @@ const DEEPSEEK_R1_8b = { }, size: '5.2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_32b = { name: 'deepseek-r1:32b', @@ -54,7 +81,12 @@ const DEEPSEEK_R1_32b = { }, size: '20gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_70b = { name: 'deepseek-r1:70b', @@ -65,7 +97,12 @@ const DEEPSEEK_R1_70b = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_R1_671b = { name: 'deepseek-r1:671b', @@ -76,7 +113,12 @@ const DEEPSEEK_R1_671b = { }, size: '404gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> export const DEEPSEEK_R1_MODELS = [ DEEPSEEK_R1_LATEST.name, @@ -101,13 +143,48 @@ export const DEEPSEEK_R1_MODELS = [ // Manual type map for per-model provider options export type DeepseekR1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_R1_LATEST.name]: ChatRequest - [DEEPSEEK_R1_1_5b.name]: ChatRequest - [DEEPSEEK_R1_7b.name]: ChatRequest - [DEEPSEEK_R1_8b.name]: ChatRequest - [DEEPSEEK_R1_32b.name]: ChatRequest - [DEEPSEEK_R1_70b.name]: ChatRequest - [DEEPSEEK_R1_671b.name]: ChatRequest + [DEEPSEEK_R1_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_1_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_8b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_32b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_70b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_R1_671b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > } export type DeepseekR1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts index 413413ed..899a7d51 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -1,5 +1,11 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestThinking, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const DEEPSEEK_V3_1_LATEST = { name: 'deepseek-v3.1:latest', @@ -10,7 +16,12 @@ const DEEPSEEK_V3_1_LATEST = { }, size: '404gb', context: 160_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_V3_1_671b = { name: 'deepseek-v3.1:671', @@ -22,7 +33,12 @@ const DEEPSEEK_V3_1_671b = { size: '404gb', context: 160_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const DEEPSEEK_V3_1_671b_cloud = { name: 'deepseek-v3.1:671-cloud', @@ -33,7 +49,12 @@ const DEEPSEEK_V3_1_671b_cloud = { }, size: '404gb', context: 160_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> export const DEEPSEEK_V3_1_MODELS = [ DEEPSEEK_V3_1_LATEST.name, @@ -54,9 +75,24 @@ export const DEEPSEEK_V3_1_MODELS = [ // Manual type map for per-model provider options export type Deepseekv3_1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_V3_1_LATEST.name]: ChatRequest - [DEEPSEEK_V3_1_671b.name]: ChatRequest - [DEEPSEEK_V3_1_671b_cloud.name]: ChatRequest + [DEEPSEEK_V3_1_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_V3_1_671b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [DEEPSEEK_V3_1_671b_cloud.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > } export type Deepseekv3_1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts index 246729ca..2ec725d9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const DEVSTRAL_LATEST = { name: 'devstral:latest', @@ -10,7 +15,11 @@ const DEVSTRAL_LATEST = { }, size: '14gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const DEVSTRAL_24b = { name: 'devstral:24b', @@ -21,7 +30,11 @@ const DEVSTRAL_24b = { }, size: '14gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const DEVSTRAL_MODELS = [ DEVSTRAL_LATEST.name, @@ -41,8 +54,16 @@ export const DEVSTRAL_MODELS = [ // Manual type map for per-model provider options export type DevstralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEVSTRAL_LATEST.name]: ChatRequest - [DEVSTRAL_24b.name]: ChatRequest + [DEVSTRAL_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [DEVSTRAL_24b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type DevstralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts index f45d44b4..a044a257 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const DOLPHIN3_LATEST = { name: 'dolphin3:latest', @@ -10,7 +13,9 @@ const DOLPHIN3_LATEST = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const DOLPHIN3_8b = { name: 'dolphin3:8b', @@ -21,7 +26,9 @@ const DOLPHIN3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const @@ -38,8 +45,12 @@ export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const // Manual type map for per-model provider options export type Dolphin3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DOLPHIN3_LATEST.name]: ChatRequest - [DOLPHIN3_8b.name]: ChatRequest + [DOLPHIN3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [DOLPHIN3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Dolphin3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts index 131f57c1..7a2b84e6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const EXAONE3_5_LATEST = { name: 'exaone3.5:latest', @@ -10,7 +13,9 @@ const EXAONE3_5_LATEST = { }, size: '4.8gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const EXAONE3_5_2_4b = { name: 'exaone3.5:2.4b', @@ -21,7 +26,9 @@ const EXAONE3_5_2_4b = { }, size: '1.6gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const EXAONE3_5_7_1b = { name: 'exaone3.5:7.8b', @@ -32,7 +39,9 @@ const EXAONE3_5_7_1b = { }, size: '4.8gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const EXAONE3_5_32b = { name: 'exaone3.5:32b', @@ -43,7 +52,9 @@ const EXAONE3_5_32b = { }, size: '19gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const EXAONE3_5MODELS = [ EXAONE3_5_LATEST.name, @@ -65,10 +76,18 @@ export const EXAONE3_5MODELS = [ // Manual type map for per-model provider options export type Exaone3_5ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [EXAONE3_5_LATEST.name]: ChatRequest - [EXAONE3_5_2_4b.name]: ChatRequest - [EXAONE3_5_7_1b.name]: ChatRequest - [EXAONE3_5_32b.name]: ChatRequest + [EXAONE3_5_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [EXAONE3_5_2_4b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [EXAONE3_5_7_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [EXAONE3_5_32b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Exaone3_5ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts index f353b2f4..8824d6fc 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const FALCON2_LATEST = { name: 'falcon2:latest', @@ -10,7 +13,9 @@ const FALCON2_LATEST = { }, size: '6.4gb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FALCON2_11b = { name: 'falcon2:11b', @@ -21,7 +26,9 @@ const FALCON2_11b = { }, size: '6.4gb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const @@ -38,8 +45,12 @@ export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const // Manual type map for per-model provider options export type Falcon2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FALCON2_LATEST.name]: ChatRequest - [FALCON2_11b.name]: ChatRequest + [FALCON2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FALCON2_11b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Falcon2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts index 50e15cee..d34dc4d5 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const FALCON3_LATEST = { name: 'falcon3:latest', @@ -10,7 +13,9 @@ const FALCON3_LATEST = { }, size: '4.6gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FALCON3_1b = { name: 'falcon3:1b', @@ -21,7 +26,9 @@ const FALCON3_1b = { }, size: '1.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FALCON3_3b = { name: 'falcon3:3b', @@ -32,7 +39,9 @@ const FALCON3_3b = { }, size: '2gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FALCON3_7b = { name: 'falcon3:7b', @@ -43,7 +52,9 @@ const FALCON3_7b = { }, size: '4.6gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FALCON3_10b = { name: 'falcon3:10b', @@ -54,7 +65,9 @@ const FALCON3_10b = { }, size: '6.3gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const FALCON3_MODELS = [ FALCON3_LATEST.name, @@ -77,11 +90,21 @@ export const FALCON3_MODELS = [ // Manual type map for per-model provider options export type Falcon3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FALCON3_LATEST.name]: ChatRequest - [FALCON3_1b.name]: ChatRequest - [FALCON3_3b.name]: ChatRequest - [FALCON3_7b.name]: ChatRequest - [FALCON3_10b.name]: ChatRequest + [FALCON3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FALCON3_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FALCON3_3b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FALCON3_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FALCON3_10b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Falcon3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts index 517616a4..f15d31b4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const FIREFUNCTION_V2_LATEST = { name: 'firefunction-v2:latest', @@ -10,7 +13,9 @@ const FIREFUNCTION_V2_LATEST = { }, size: '40gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const FIREFUNCTION_V2_70b = { name: 'firefunction-v2:70b', @@ -21,7 +26,9 @@ const FIREFUNCTION_V2_70b = { }, size: '40gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const FIREFUNCTION_V2_MODELS = [ FIREFUNCTION_V2_LATEST.name, @@ -41,8 +48,12 @@ export const FIREFUNCTION_V2_MODELS = [ // Manual type map for per-model provider options export type Firefunction_V2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FIREFUNCTION_V2_LATEST.name]: ChatRequest - [FIREFUNCTION_V2_70b.name]: ChatRequest + [FIREFUNCTION_V2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [FIREFUNCTION_V2_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Firefunction_V2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts index b1b66f02..1d3cd037 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const GEMMA_LATEST = { name: 'gemma:latest', @@ -10,7 +13,9 @@ const GEMMA_LATEST = { }, size: '5gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA_2b = { name: 'gemma:2b', @@ -21,7 +26,9 @@ const GEMMA_2b = { }, size: '1.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA_7b = { name: 'gemma:7b', @@ -32,7 +39,9 @@ const GEMMA_7b = { }, size: '5gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const GEMMA_MODELS = [ GEMMA_LATEST.name, @@ -53,9 +62,15 @@ export const GEMMA_MODELS = [ // Manual type map for per-model provider options export type GemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA_LATEST.name]: ChatRequest - [GEMMA_2b.name]: ChatRequest - [GEMMA_7b.name]: ChatRequest + [GEMMA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA_2b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type GemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts index b5b594a8..4b2f295a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const GEMMA2_LATEST = { name: 'gemma2:latest', @@ -10,7 +13,9 @@ const GEMMA2_LATEST = { }, size: '5.4gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA2_2b = { name: 'gemma2:2b', @@ -21,7 +26,9 @@ const GEMMA2_2b = { }, size: '1.6gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA2_9b = { name: 'gemma2:9b', @@ -32,7 +39,9 @@ const GEMMA2_9b = { }, size: '5.4gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA2_27b = { name: 'gemma2:27b', @@ -43,7 +52,9 @@ const GEMMA2_27b = { }, size: '16gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const GEMMA2_MODELS = [ GEMMA2_LATEST.name, @@ -65,10 +76,18 @@ export const GEMMA2_MODELS = [ // Manual type map for per-model provider options export type Gemma2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA2_LATEST.name]: ChatRequest - [GEMMA2_2b.name]: ChatRequest - [GEMMA2_9b.name]: ChatRequest - [GEMMA2_27b.name]: ChatRequest + [GEMMA2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA2_2b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA2_9b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA2_27b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Gemma2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts index e10daf25..1eb1e59e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const GEMMA3_LATEST = { name: 'gemma3:latest', @@ -10,7 +14,9 @@ const GEMMA3_LATEST = { }, size: '3.3gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA3_270m = { name: 'gemma3:270m', @@ -21,7 +27,9 @@ const GEMMA3_270m = { }, size: '298mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA3_1b = { name: 'gemma3:1b', @@ -32,7 +40,9 @@ const GEMMA3_1b = { }, size: '815mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA3_4b = { name: 'gemma3:4b', @@ -43,7 +53,9 @@ const GEMMA3_4b = { }, size: '3.3gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA3_12b = { name: 'gemma3:12b', @@ -54,7 +66,9 @@ const GEMMA3_12b = { }, size: '8.1gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GEMMA3_27b = { name: 'gemma3:27b', @@ -65,7 +79,9 @@ const GEMMA3_27b = { }, size: '17gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const GEMMA3_MODELS = [ GEMMA3_LATEST.name, @@ -89,12 +105,24 @@ export const GEMMA3_MODELS = [ // Manual type map for per-model provider options export type Gemma3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA3_LATEST.name]: ChatRequest - [GEMMA3_270m.name]: ChatRequest - [GEMMA3_1b.name]: ChatRequest - [GEMMA3_4b.name]: ChatRequest - [GEMMA3_12b.name]: ChatRequest - [GEMMA3_27b.name]: ChatRequest + [GEMMA3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA3_270m.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA3_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA3_4b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA3_12b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GEMMA3_27b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Gemma3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts index 6f28a433..545295bb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const GRANITE3_DENSE_LATEST = { name: 'granite3-dense:latest', @@ -10,7 +13,9 @@ const GRANITE3_DENSE_LATEST = { }, size: '1.6gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GRANITE3_DENSE_2b = { name: 'granite3-dense:2b', @@ -21,7 +26,9 @@ const GRANITE3_DENSE_2b = { }, size: '1.6gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GRANITE3_DENSE_8b = { name: 'granite3-dense:8b', @@ -32,7 +39,9 @@ const GRANITE3_DENSE_8b = { }, size: '4.9gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const GRANITE3_DENSE_MODELS = [ GRANITE3_DENSE_LATEST.name, @@ -53,9 +62,15 @@ export const GRANITE3_DENSE_MODELS = [ // Manual type map for per-model provider options export type Granite3DenseChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_DENSE_LATEST.name]: ChatRequest - [GRANITE3_DENSE_2b.name]: ChatRequest - [GRANITE3_DENSE_8b.name]: ChatRequest + [GRANITE3_DENSE_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GRANITE3_DENSE_2b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GRANITE3_DENSE_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Granite3DenseModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts index 798118cb..436939f1 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const GRANITE3_GUARDIAN_LATEST = { name: 'granite3-guardian:latest', @@ -10,7 +13,9 @@ const GRANITE3_GUARDIAN_LATEST = { }, size: '2.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GRANITE3_GUARDIAN_2b = { name: 'granite3-guardian:2b', @@ -21,7 +26,9 @@ const GRANITE3_GUARDIAN_2b = { }, size: '2.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const GRANITE3_GUARDIAN_8b = { name: 'granite3-guardian:8b', @@ -32,7 +39,9 @@ const GRANITE3_GUARDIAN_8b = { }, size: '5.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const GRANITE3_GUARDIAN_MODELS = [ GRANITE3_GUARDIAN_LATEST.name, @@ -53,9 +62,15 @@ export const GRANITE3_GUARDIAN_MODELS = [ // Manual type map for per-model provider options export type Granite3GuardianChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_GUARDIAN_LATEST.name]: ChatRequest - [GRANITE3_GUARDIAN_2b.name]: ChatRequest - [GRANITE3_GUARDIAN_8b.name]: ChatRequest + [GRANITE3_GUARDIAN_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GRANITE3_GUARDIAN_2b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [GRANITE3_GUARDIAN_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Granite3GuardianModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts index 4d43bf2d..1eaa6ec1 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const GRANITE3_MOE_LATEST = { name: 'granite3-moe:latest', @@ -10,7 +15,11 @@ const GRANITE3_MOE_LATEST = { }, size: '822mb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_MOE_1b = { name: 'granite3-moe:2b', @@ -21,7 +30,11 @@ const GRANITE3_MOE_1b = { }, size: '822mb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_MOE_3b = { name: 'granite3-moe:8b', @@ -32,7 +45,11 @@ const GRANITE3_MOE_3b = { }, size: '2.1gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const GRANITE3_MOE_MODELS = [ GRANITE3_MOE_LATEST.name, @@ -53,9 +70,21 @@ export const GRANITE3_MOE_MODELS = [ // Manual type map for per-model provider options export type Granite3MoeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_MOE_LATEST.name]: ChatRequest - [GRANITE3_MOE_1b.name]: ChatRequest - [GRANITE3_MOE_3b.name]: ChatRequest + [GRANITE3_MOE_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_MOE_1b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_MOE_3b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Granite3MoeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts index 2dbf7374..3cf4ce4c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const GRANITE3_1_DENSE_LATEST = { name: 'granite3.1-dense:latest', @@ -10,7 +15,11 @@ const GRANITE3_1_DENSE_LATEST = { }, size: '5gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_1_DENSE_2b = { name: 'granite3.1-dense:2b', @@ -21,7 +30,11 @@ const GRANITE3_1_DENSE_2b = { }, size: '1.6gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_1_DENSE_8b = { name: 'granite3.1-dense:8b', @@ -32,7 +45,11 @@ const GRANITE3_1_DENSE_8b = { }, size: '5gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const GRANITE3_1_DENSE_MODELS = [ GRANITE3_1_DENSE_LATEST.name, @@ -53,9 +70,21 @@ export const GRANITE3_1_DENSE_MODELS = [ // Manual type map for per-model provider options export type Granite3_1DenseChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_1_DENSE_LATEST.name]: ChatRequest - [GRANITE3_1_DENSE_2b.name]: ChatRequest - [GRANITE3_1_DENSE_8b.name]: ChatRequest + [GRANITE3_1_DENSE_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_1_DENSE_2b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_1_DENSE_8b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Granite3_1DenseModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts index 7d513967..e3b7792e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const GRANITE3_1_MOE_LATEST = { name: 'granite3.1-moe:latest', @@ -10,7 +15,11 @@ const GRANITE3_1_MOE_LATEST = { }, size: '2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_1_MOE_1b = { name: 'granite3.1-moe:2b', @@ -21,7 +30,11 @@ const GRANITE3_1_MOE_1b = { }, size: '1.4gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const GRANITE3_1_MOE_3b = { name: 'granite3.1-moe:8b', @@ -32,7 +45,11 @@ const GRANITE3_1_MOE_3b = { }, size: '2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const GRANITE3_1_MOE_MODELS = [ GRANITE3_1_MOE_LATEST.name, @@ -53,9 +70,21 @@ export const GRANITE3_1_MOE_MODELS = [ // Manual type map for per-model provider options export type Granite3_1MoeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_1_MOE_LATEST.name]: ChatRequest - [GRANITE3_1_MOE_1b.name]: ChatRequest - [GRANITE3_1_MOE_3b.name]: ChatRequest + [GRANITE3_1_MOE_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_1_MOE_1b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [GRANITE3_1_MOE_3b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Granite3_1MoeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts index db18d06d..7f87af9f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const LLAMA_GUARD3_LATEST = { name: 'llama3:latest', @@ -10,7 +13,9 @@ const LLAMA_GUARD3_LATEST = { }, size: '4.9b', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA_GUARD3_1b = { name: 'llama3:7b', @@ -21,7 +26,9 @@ const LLAMA_GUARD3_1b = { }, size: '1.6gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA_GUARD3_8b = { name: 'llama3:70b', @@ -32,7 +39,9 @@ const LLAMA_GUARD3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAMA_GUARD3_MODELS = [ LLAMA_GUARD3_LATEST.name, @@ -53,9 +62,15 @@ export const LLAMA_GUARD3_MODELS = [ // Manual type map for per-model provider options export type LlamaGuard3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA_GUARD3_LATEST.name]: ChatRequest - [LLAMA_GUARD3_1b.name]: ChatRequest - [LLAMA_GUARD3_8b.name]: ChatRequest + [LLAMA_GUARD3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA_GUARD3_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA_GUARD3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type LlamaGuard3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts index 44a9c66d..27be0978 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const LLAMA2_LATEST = { name: 'llama2:latest', @@ -10,7 +13,9 @@ const LLAMA2_LATEST = { }, size: '3.8gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA2_7b = { name: 'llama2:7b', @@ -21,7 +26,9 @@ const LLAMA2_7b = { }, size: '3.8gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA2_13b = { name: 'llama2:13b', @@ -32,7 +39,9 @@ const LLAMA2_13b = { }, size: '7.4gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA2_70b = { name: 'llama2:70b', @@ -43,7 +52,9 @@ const LLAMA2_70b = { }, size: '39gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAMA2_MODELS = [ LLAMA2_LATEST.name, @@ -65,10 +76,18 @@ export const LLAMA2_MODELS = [ // Manual type map for per-model provider options export type Llama2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA2_LATEST.name]: ChatRequest - [LLAMA2_7b.name]: ChatRequest - [LLAMA2_13b.name]: ChatRequest - [LLAMA2_70b.name]: ChatRequest + [LLAMA2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA2_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA2_13b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA2_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Llama2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts index 58063a03..ddf088b6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const LLAMA3_CHATQA_LATEST = { name: 'llama3-chatqa:latest', @@ -10,7 +13,9 @@ const LLAMA3_CHATQA_LATEST = { }, size: '4.7b', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_CHATQA_8b = { name: 'llama3-chatqa:8b', @@ -21,7 +26,9 @@ const LLAMA3_CHATQA_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_CHATQA_70b = { name: 'llama3-chatqa:70b', @@ -32,7 +39,7 @@ const LLAMA3_CHATQA_70b = { }, size: '40gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta export const LLAMA3_CHATQA_MODELS = [ LLAMA3_CHATQA_LATEST.name, @@ -53,9 +60,15 @@ export const LLAMA3_CHATQA_MODELS = [ // Manual type map for per-model provider options export type Llama3ChatQaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_CHATQA_LATEST.name]: ChatRequest - [LLAMA3_CHATQA_8b.name]: ChatRequest - [LLAMA3_CHATQA_70b.name]: ChatRequest + [LLAMA3_CHATQA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_CHATQA_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_CHATQA_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Llama3ChatQaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts index 8a658e6f..3fb75963 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const LLAMA3_GRADIENT_LATEST = { name: 'llama3-gradient:latest', @@ -10,7 +13,9 @@ const LLAMA3_GRADIENT_LATEST = { }, size: '4.7b', context: 1_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_GRADIENT_8b = { name: 'llama3-gradient:8b', @@ -21,7 +26,9 @@ const LLAMA3_GRADIENT_8b = { }, size: '4.7gb', context: 1_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_GRADIENT_70b = { name: 'llama3-gradient:70b', @@ -32,7 +39,7 @@ const LLAMA3_GRADIENT_70b = { }, size: '40gb', context: 1_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta export const LLAMA3_GRADIENT_MODELS = [ LLAMA3_GRADIENT_LATEST.name, @@ -53,9 +60,15 @@ export const LLAMA3_GRADIENT_MODELS = [ // Manual type map for per-model provider options export type Llama3GradientChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_GRADIENT_LATEST.name]: ChatRequest - [LLAMA3_GRADIENT_8b.name]: ChatRequest - [LLAMA3_GRADIENT_70b.name]: ChatRequest + [LLAMA3_GRADIENT_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_GRADIENT_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_GRADIENT_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Llama3GradientModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts index 66186581..8696b509 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const LLAMA3_1_LATEST = { name: 'llama3.1:latest', @@ -10,7 +15,11 @@ const LLAMA3_1_LATEST = { }, size: '4.9b', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_1_8b = { name: 'llama3.1:8b', @@ -21,7 +30,11 @@ const LLAMA3_1_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_1_70b = { name: 'llama3.1:70b', @@ -32,7 +45,11 @@ const LLAMA3_1_70b = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_1_405b = { name: 'llama3.1:70b', @@ -43,7 +60,11 @@ const LLAMA3_1_405b = { }, size: '243gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const LLAMA3_1_MODELS = [ LLAMA3_1_LATEST.name, @@ -65,10 +86,26 @@ export const LLAMA3_1_MODELS = [ // Manual type map for per-model provider options export type Llama3_1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_1_LATEST.name]: ChatRequest - [LLAMA3_1_8b.name]: ChatRequest - [LLAMA3_1_70b.name]: ChatRequest - [LLAMA3_1_405b.name]: ChatRequest + [LLAMA3_1_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_1_8b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_1_70b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_1_405b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Llama3_1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts index d840815f..3a94aec9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const LLAMA3_2_VISION_LATEST = { name: 'llama3.2:latest', @@ -10,7 +14,9 @@ const LLAMA3_2_VISION_LATEST = { }, size: '7.8b', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_2_VISION_11b = { name: 'llama3.2:11b', @@ -21,7 +27,9 @@ const LLAMA3_2_VISION_11b = { }, size: '1gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_2_VISION_90b = { name: 'llama3.2:90b', @@ -32,7 +40,9 @@ const LLAMA3_2_VISION_90b = { }, size: '55gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAMA3_2_VISION_MODELS = [ LLAMA3_2_VISION_LATEST.name, @@ -53,9 +63,15 @@ export const LLAMA3_2_VISION_MODELS = [ // Manual type map for per-model provider options export type Llama3_2VisionChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_2_VISION_LATEST.name]: ChatRequest - [LLAMA3_2_VISION_11b.name]: ChatRequest - [LLAMA3_2_VISION_90b.name]: ChatRequest + [LLAMA3_2_VISION_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_2_VISION_11b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_2_VISION_90b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Llama3_2VisionModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts index 328adcce..206c5965 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const LLAMA3_2_LATEST = { name: 'llama3.2:latest', @@ -10,7 +15,11 @@ const LLAMA3_2_LATEST = { }, size: '2b', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_2_1b = { name: 'llama3.2:1b', @@ -21,7 +30,11 @@ const LLAMA3_2_1b = { }, size: '1.3gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_2_3b = { name: 'llama3.2:3b', @@ -32,7 +45,11 @@ const LLAMA3_2_3b = { }, size: '2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const LLAMA3_2_MODELS = [ LLAMA3_2_LATEST.name, @@ -53,9 +70,21 @@ export const LLAMA3_2_MODELS = [ // Manual type map for per-model provider options export type Llama3_2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_2_LATEST.name]: ChatRequest - [LLAMA3_2_1b.name]: ChatRequest - [LLAMA3_2_3b.name]: ChatRequest + [LLAMA3_2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_2_1b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_2_3b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Llama3_2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts index 1cbc63a8..1ae18c80 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const LLAMA3_3_LATEST = { name: 'llama3.3:latest', @@ -10,7 +15,11 @@ const LLAMA3_3_LATEST = { }, size: '43b', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA3_3_70b = { name: 'llama3.3:8b', @@ -21,7 +30,11 @@ const LLAMA3_3_70b = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const LLAMA3_3_MODELS = [ LLAMA3_3_LATEST.name, @@ -41,8 +54,16 @@ export const LLAMA3_3_MODELS = [ // Manual type map for per-model provider options export type Llama3_3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_3_LATEST.name]: ChatRequest - [LLAMA3_3_70b.name]: ChatRequest + [LLAMA3_3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA3_3_70b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Llama3_3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts index d61504b9..79f54d69 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const LLAMA3_LATEST = { name: 'llama3:latest', @@ -10,7 +13,9 @@ const LLAMA3_LATEST = { }, size: '4.7b', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_8b = { name: 'llama3:7b', @@ -21,7 +26,9 @@ const LLAMA3_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAMA3_70b = { name: 'llama3:70b', @@ -32,7 +39,9 @@ const LLAMA3_70b = { }, size: '40gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAMA3_MODELS = [ LLAMA3_LATEST.name, @@ -53,9 +62,15 @@ export const LLAMA3_MODELS = [ // Manual type map for per-model provider options export type Llama3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_LATEST.name]: ChatRequest - [LLAMA3_8b.name]: ChatRequest - [LLAMA3_70b.name]: ChatRequest + [LLAMA3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAMA3_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Llama3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts index 418cc25d..1f816118 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -1,5 +1,12 @@ import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageImages, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const LLAMA4_LATEST = { name: 'llama4:latest', @@ -10,7 +17,11 @@ const LLAMA4_LATEST = { }, size: '67b', context: 10_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA4_16X17b = { name: 'llama4:16x17b', @@ -21,7 +32,11 @@ const LLAMA4_16X17b = { }, size: '67gb', context: 10_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const LLAMA4_128X17b = { name: 'llama4:128x17b', @@ -32,7 +47,11 @@ const LLAMA4_128X17b = { }, size: '245gb', context: 1_000_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const LLAMA4_MODELS = [ LLAMA4_LATEST.name, @@ -53,9 +72,21 @@ export const LLAMA4_MODELS = [ // Manual type map for per-model provider options export type Llama3_4ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA4_LATEST.name]: ChatRequest - [LLAMA4_16X17b.name]: ChatRequest - [LLAMA4_128X17b.name]: ChatRequest + [LLAMA4_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA4_16X17b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAMA4_128X17b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Llama3_4ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts index da96e112..f8a90719 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -1,5 +1,11 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageImages, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const LLAVA_LLAMA3_LATEST = { name: 'llava-llama3:latest', @@ -10,7 +16,9 @@ const LLAVA_LLAMA3_LATEST = { }, size: '5.5b', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAVA_LLAMA3_8b = { name: 'llava-llama3:8b', @@ -21,7 +29,9 @@ const LLAVA_LLAMA3_8b = { }, size: '5.5gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAVA_LLAMA3_MODELS = [ LLAVA_LLAMA3_LATEST.name, @@ -41,8 +51,16 @@ export const LLAVA_LLAMA3_MODELS = [ // Manual type map for per-model provider options export type LlavaLlamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_LLAMA3_LATEST.name]: ChatRequest - [LLAVA_LLAMA3_8b.name]: ChatRequest + [LLAVA_LLAMA3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [LLAVA_LLAMA3_8b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type LlavaLlamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts index 4c725a64..be99034e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const LLAVA_PHI3_LATEST = { name: 'llava-phi3:latest', @@ -10,7 +14,9 @@ const LLAVA_PHI3_LATEST = { }, size: '2.9b', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAVA_PHI3_8b = { name: 'llava-phi3:8b', @@ -21,7 +27,9 @@ const LLAVA_PHI3_8b = { }, size: '2.9gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAVA_PHI3_MODELS = [ LLAVA_PHI3_LATEST.name, @@ -41,8 +49,12 @@ export const LLAVA_PHI3_MODELS = [ // Manual type map for per-model provider options export type LlavaPhi3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_PHI3_LATEST.name]: ChatRequest - [LLAVA_PHI3_8b.name]: ChatRequest + [LLAVA_PHI3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAVA_PHI3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type LlavaPhi3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts index 18e7f762..371e7c3e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const LLAVA_LATEST = { name: 'llava:latest', @@ -10,7 +14,9 @@ const LLAVA_LATEST = { }, size: '4.7b', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAVA_7b = { name: 'llava:7b', @@ -21,7 +27,9 @@ const LLAVA_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAVA_13b = { name: 'llava:13b', @@ -32,7 +40,9 @@ const LLAVA_13b = { }, size: '8gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const LLAVA_34b = { name: 'llava:34b', @@ -43,7 +53,9 @@ const LLAVA_34b = { }, size: '20gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const LLAVA_MODELS = [ LLAVA_LATEST.name, @@ -65,10 +77,18 @@ export const LLAVA_MODELS = [ // Manual type map for per-model provider options export type llavaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_LATEST.name]: ChatRequest - [LLAVA_7b.name]: ChatRequest - [LLAVA_13b.name]: ChatRequest - [LLAVA_34b.name]: ChatRequest + [LLAVA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAVA_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAVA_13b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [LLAVA_34b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type llavaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts index fb44d209..aaeb10a4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const MARCO_O1_LATEST = { name: 'marco-o1:latest', @@ -10,7 +13,9 @@ const MARCO_O1_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const MARCO_O1_7b = { name: 'marco-o1:7b', @@ -21,7 +26,9 @@ const MARCO_O1_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const @@ -38,8 +45,12 @@ export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const // Manual type map for per-model provider options export type MarcoO1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MARCO_O1_LATEST.name]: ChatRequest - [MARCO_O1_7b.name]: ChatRequest + [MARCO_O1_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [MARCO_O1_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type MarcoO1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts index 7f2055f2..4c9d825c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const MISTRAL_LARGE_LATEST = { name: 'mistral-large:latest', @@ -10,7 +15,11 @@ const MISTRAL_LARGE_LATEST = { }, size: '73gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MISTRAL_LARGE_123b = { name: 'mistral-large:123b', @@ -21,7 +30,11 @@ const MISTRAL_LARGE_123b = { }, size: '73gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const MISTRAL_LARGE_MODELS = [ MISTRAL_LARGE_LATEST.name, @@ -41,8 +54,16 @@ export const MISTRAL_LARGE_MODELS = [ // Manual type map for per-model provider options export type MistralLargeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_LARGE_LATEST.name]: ChatRequest - [MISTRAL_LARGE_123b.name]: ChatRequest + [MISTRAL_LARGE_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MISTRAL_LARGE_123b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type MistralLargeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts index 39fb3ab6..c1d3c4bd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const MISTRAL_NEMO_LATEST = { name: 'mistral-nemo:latest', @@ -10,7 +15,11 @@ const MISTRAL_NEMO_LATEST = { }, size: '7.1gb', context: 1_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MISTRAL_NEMO_12b = { name: 'mistral-nemo:12b', @@ -21,7 +30,11 @@ const MISTRAL_NEMO_12b = { }, size: '7.1gb', context: 1_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const MISTRAL_NEMO_MODELS = [ MISTRAL_NEMO_LATEST.name, @@ -41,8 +54,16 @@ export const MISTRAL_NEMO_MODELS = [ // Manual type map for per-model provider options export type MistralNemoChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_NEMO_LATEST.name]: ChatRequest - [MISTRAL_NEMO_12b.name]: ChatRequest + [MISTRAL_NEMO_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MISTRAL_NEMO_12b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type MistralNemoModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts index 3dabd7d2..2c48efb6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const MISTRAL_SMALL_LATEST = { name: 'mistral-small:latest', @@ -10,7 +15,11 @@ const MISTRAL_SMALL_LATEST = { }, size: '14gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MISTRAL_SMALL_22b = { name: 'mistral-small:12b', @@ -21,7 +30,11 @@ const MISTRAL_SMALL_22b = { }, size: '13gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MISTRAL_SMALL_24b = { name: 'mistral-small:12b', @@ -32,7 +45,11 @@ const MISTRAL_SMALL_24b = { }, size: '13gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const MISTRAL_SMALL_MODELS = [ MISTRAL_SMALL_LATEST.name, @@ -53,9 +70,21 @@ export const MISTRAL_SMALL_MODELS = [ // Manual type map for per-model provider options export type MistralSmallChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_SMALL_LATEST.name]: ChatRequest - [MISTRAL_SMALL_22b.name]: ChatRequest - [MISTRAL_SMALL_24b.name]: ChatRequest + [MISTRAL_SMALL_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MISTRAL_SMALL_22b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MISTRAL_SMALL_24b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type MistralSmallModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts index 55efb14d..51b7c27a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const MISTRAL_LATEST = { name: 'mistral:latest', @@ -10,7 +14,9 @@ const MISTRAL_LATEST = { }, size: '2.9gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const MISTRAL_7b = { name: 'mistral:87', @@ -21,7 +27,9 @@ const MISTRAL_7b = { }, size: '2.9gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const @@ -38,8 +46,12 @@ export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const // Manual type map for per-model provider options export type MistralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_LATEST.name]: ChatRequest - [MISTRAL_7b.name]: ChatRequest + [MISTRAL_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [MISTRAL_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type MistralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts index 37656cd2..d1df33cb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const MIXTRAL_LATEST = { name: 'mixtral:latest', @@ -10,7 +15,11 @@ const MIXTRAL_LATEST = { }, size: '26gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MIXTRAL_8X7b = { name: 'mixtral:8x7b', @@ -21,7 +30,11 @@ const MIXTRAL_8X7b = { }, size: '26gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const MIXTRAL_8X22b = { name: 'mixtral:8x22b', @@ -32,7 +45,11 @@ const MIXTRAL_8X22b = { }, size: '80gb', context: 64_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const MIXTRAL_MODELS = [ MIXTRAL_LATEST.name, @@ -53,9 +70,21 @@ export const MIXTRAL_MODELS = [ // Manual type map for per-model provider options export type MixtralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MIXTRAL_LATEST.name]: ChatRequest - [MIXTRAL_8X7b.name]: ChatRequest - [MIXTRAL_8X22b.name]: ChatRequest + [MIXTRAL_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MIXTRAL_8X7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [MIXTRAL_8X22b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type MixtralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts index 50be72ad..3b99aedb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -1,5 +1,9 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaMessageImages, + OllamaModelMeta, +} from './models-meta' const MOONDREAM_LATEST = { name: 'moondream:latest', @@ -10,7 +14,9 @@ const MOONDREAM_LATEST = { }, size: '1.7gb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const MOONDREAM_1_8b = { name: 'moondream:1.8b', @@ -21,7 +27,9 @@ const MOONDREAM_1_8b = { }, size: '1.7gb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const MOONDREAM_MODELS = [ MOONDREAM_LATEST.name, @@ -41,8 +49,12 @@ export const MOONDREAM_MODELS = [ // Manual type map for per-model provider options export type MoondreamChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MOONDREAM_LATEST.name]: ChatRequest - [MOONDREAM_1_8b.name]: ChatRequest + [MOONDREAM_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [MOONDREAM_1_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type MoondreamModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts index b5a9cc2d..6cda8fb4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const NEMOTRON_MINI_LATEST = { name: 'nemotron-mini:latest', @@ -10,7 +15,11 @@ const NEMOTRON_MINI_LATEST = { }, size: '2.7gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const NEMOTRON_MINI_4b = { name: 'nemotron-mini:4b', @@ -21,7 +30,11 @@ const NEMOTRON_MINI_4b = { }, size: '2.7gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const NEMOTRON_MINI_MODELS = [ NEMOTRON_MINI_LATEST.name, @@ -41,8 +54,16 @@ export const NEMOTRON_MINI_MODELS = [ // Manual type map for per-model provider options export type NemotronMiniChatModelProviderOptionsByName = { // Models with thinking and structured output support - [NEMOTRON_MINI_LATEST.name]: ChatRequest - [NEMOTRON_MINI_4b.name]: ChatRequest + [NEMOTRON_MINI_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [NEMOTRON_MINI_4b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type NemotronMiniModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts index 3f06d9ea..58d3704e 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const NEMOTRON_LATEST = { name: 'nemotron:latest', @@ -10,7 +15,11 @@ const NEMOTRON_LATEST = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const NEMOTRON_70b = { name: 'nemotron:70b', @@ -21,7 +30,11 @@ const NEMOTRON_70b = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const NEMOTRON_MODELS = [ NEMOTRON_LATEST.name, @@ -41,8 +54,16 @@ export const NEMOTRON_MODELS = [ // Manual type map for per-model provider options export type NemotronChatModelProviderOptionsByName = { // Models with thinking and structured output support - [NEMOTRON_LATEST.name]: ChatRequest - [NEMOTRON_70b.name]: ChatRequest + [NEMOTRON_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [NEMOTRON_70b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type NemotronModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts index 621bc7b8..ab2794fd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const OLMO2_LATEST = { name: 'olmo2:latest', @@ -10,7 +13,9 @@ const OLMO2_LATEST = { }, size: '4.5gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OLMO2_7b = { name: 'olmo2:7b', @@ -21,7 +26,9 @@ const OLMO2_7b = { }, size: '4.5gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OLMO2_13b = { name: 'olmo2:13b', @@ -32,7 +39,9 @@ const OLMO2_13b = { }, size: '8.4gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const OLMO2_MODELS = [ OLMO2_LATEST.name, @@ -53,9 +62,15 @@ export const OLMO2_MODELS = [ // Manual type map for per-model provider options export type Olmo2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OLMO2_LATEST.name]: ChatRequest - [OLMO2_7b.name]: ChatRequest - [OLMO2_13b.name]: ChatRequest + [OLMO2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OLMO2_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OLMO2_13b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Olmo2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts index 0b22d3b2..114f2046 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const OPENCODER_LATEST = { name: 'opencoder:latest', @@ -10,7 +13,9 @@ const OPENCODER_LATEST = { }, size: '4.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OPENCODER_1_5b = { name: 'opencoder:1.5b', @@ -21,7 +26,9 @@ const OPENCODER_1_5b = { }, size: '1.4gb', context: 4_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OPENCODER_8b = { name: 'opencoder:8b', @@ -32,7 +39,9 @@ const OPENCODER_8b = { }, size: '4.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const OPENCODER_MODELS = [ OPENCODER_LATEST.name, @@ -53,9 +62,15 @@ export const OPENCODER_MODELS = [ // Manual type map for per-model provider options export type OpencoderChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OPENCODER_LATEST.name]: ChatRequest - [OPENCODER_1_5b.name]: ChatRequest - [OPENCODER_8b.name]: ChatRequest + [OPENCODER_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OPENCODER_1_5b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OPENCODER_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type OpencoderModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts index 459baeca..7c99a506 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const OPENHERMES_LATEST = { name: 'openhermes:latest', @@ -10,7 +13,9 @@ const OPENHERMES_LATEST = { }, size: '4.1gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OPENHERMES_V2 = { name: 'openhermes:v2', @@ -21,7 +26,9 @@ const OPENHERMES_V2 = { }, size: '4.1gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const OPENHERMES_V2_5 = { name: 'openhermes:v2.5', @@ -32,7 +39,9 @@ const OPENHERMES_V2_5 = { }, size: '4.1gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const OPENHERMES_MODELS = [ OPENHERMES_LATEST.name, @@ -53,9 +62,15 @@ export const OPENHERMES_MODELS = [ // Manual type map for per-model provider options export type OpenhermesChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OPENHERMES_LATEST.name]: ChatRequest - [OPENHERMES_V2.name]: ChatRequest - [OPENHERMES_V2_5.name]: ChatRequest + [OPENHERMES_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OPENHERMES_V2.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [OPENHERMES_V2_5.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type OpenhermesModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts index 7836b653..c0660a12 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const PHI3_LATEST = { name: 'phi3:latest', @@ -10,7 +13,9 @@ const PHI3_LATEST = { }, size: '2.2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const PHI3_3_8b = { name: 'phi3:8b', @@ -21,7 +26,9 @@ const PHI3_3_8b = { }, size: '2.2gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const PHI3_14b = { name: 'phi3:14b', @@ -32,7 +39,9 @@ const PHI3_14b = { }, size: '7.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const PHI3_MODELS = [ PHI3_LATEST.name, @@ -53,9 +62,15 @@ export const PHI3_MODELS = [ // Manual type map for per-model provider options export type Phi3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [PHI3_LATEST.name]: ChatRequest - [PHI3_3_8b.name]: ChatRequest - [PHI3_14b.name]: ChatRequest + [PHI3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [PHI3_3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [PHI3_14b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Phi3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts index 38ffeb9b..b2dc3f7b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const PHI4_LATEST = { name: 'phi4:latest', @@ -10,7 +13,9 @@ const PHI4_LATEST = { }, size: '9.1gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const PHI4_14b = { name: 'phi4:14b', @@ -21,7 +26,9 @@ const PHI4_14b = { }, size: '9.1gb', context: 16_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const @@ -38,8 +45,12 @@ export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const // Manual type map for per-model provider options export type Phi4ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [PHI4_LATEST.name]: ChatRequest - [PHI4_14b.name]: ChatRequest + [PHI4_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [PHI4_14b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Phi4ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts index fee586de..41d9ac3b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const QWEN_LATEST = { name: 'qwen:latest', @@ -10,7 +13,9 @@ const QWEN_LATEST = { }, size: '2.3gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_0_5b = { name: 'qwen:0.5b', @@ -21,7 +26,9 @@ const QWEN_0_5b = { }, size: '395mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_1_8b = { name: 'qwen:1.8b', @@ -32,7 +39,9 @@ const QWEN_1_8b = { }, size: '1.1gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_4b = { name: 'qwen:4b', @@ -43,7 +52,9 @@ const QWEN_4b = { }, size: '2.3gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_7b = { name: 'qwen:7b', @@ -54,7 +65,9 @@ const QWEN_7b = { }, size: '4.5gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_14b = { name: 'qwen:14b', @@ -65,7 +78,9 @@ const QWEN_14b = { }, size: '8.2gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_32b = { name: 'qwen:32b', @@ -76,7 +91,9 @@ const QWEN_32b = { }, size: '18gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_72b = { name: 'qwen:72b', @@ -87,7 +104,9 @@ const QWEN_72b = { }, size: '41gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const QWEN_110b = { name: 'qwen:110b', @@ -98,7 +117,9 @@ const QWEN_110b = { }, size: '63gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const QWEN_MODELS = [ QWEN_LATEST.name, @@ -125,15 +146,29 @@ export const QWEN_MODELS = [ // Manual type map for per-model provider options export type QwenChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN_LATEST.name]: ChatRequest - [QWEN_0_5b.name]: ChatRequest - [QWEN_1_8b.name]: ChatRequest - [QWEN_4b.name]: ChatRequest - [QWEN_7b.name]: ChatRequest - [QWEN_14b.name]: ChatRequest - [QWEN_32b.name]: ChatRequest - [QWEN_72b.name]: ChatRequest - [QWEN_110b.name]: ChatRequest + [QWEN_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_0_5b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_1_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_4b.name]: OllamaModelMeta + [QWEN_7b.name]: OllamaModelMeta + [QWEN_14b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_32b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_72b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [QWEN_110b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type QwenModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts index a033db9c..c794797f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const QWEN2_5_CODER_LATEST = { name: 'qwen2.5-coder:latest', @@ -10,7 +15,11 @@ const QWEN2_5_CODER_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_0_5b = { name: 'qwen2.5-coder:0.5b', @@ -21,7 +30,11 @@ const QWEN2_5_CODER_0_5b = { }, size: '398mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_1_5b = { name: 'qwen2.5-coder:1.5b', @@ -32,7 +45,11 @@ const QWEN2_5_CODER_1_5b = { }, size: '986mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_3b = { name: 'qwen2.5-coder:3b', @@ -43,7 +60,11 @@ const QWEN2_5_CODER_3b = { }, size: '1.9gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_7b = { name: 'qwen2.5-coder:7b', @@ -54,7 +75,11 @@ const QWEN2_5_CODER_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_14b = { name: 'qwen2.5-coder:14b', @@ -65,7 +90,11 @@ const QWEN2_5_CODER_14b = { }, size: '9gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_CODER_32b = { name: 'qwen2.5-coder:32b', @@ -76,7 +105,11 @@ const QWEN2_5_CODER_32b = { }, size: '20gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const QWEN2_5_CODER_MODELS = [ QWEN2_5_CODER_LATEST.name, @@ -100,13 +133,41 @@ export const QWEN2_5_CODER_MODELS = [ // Manual type map for per-model provider options export type Qwen2_5CoderChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_5_CODER_LATEST.name]: ChatRequest - [QWEN2_5_CODER_0_5b.name]: ChatRequest - [QWEN2_5_CODER_1_5b.name]: ChatRequest - [QWEN2_5_CODER_3b.name]: ChatRequest - [QWEN2_5_CODER_7b.name]: ChatRequest - [QWEN2_5_CODER_14b.name]: ChatRequest - [QWEN2_5_CODER_32b.name]: ChatRequest + [QWEN2_5_CODER_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_0_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_1_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_3b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_14b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_CODER_32b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Qwen2_5CoderModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts index 4827e758..0fbc6e9b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const QWEN2_5_LATEST = { name: 'qwen2.5:latest', @@ -10,7 +15,11 @@ const QWEN2_5_LATEST = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_0_5b = { name: 'qwen2.5:0.5b', @@ -21,7 +30,11 @@ const QWEN2_5_0_5b = { }, size: '398mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_1_5b = { name: 'qwen2.5:1.5b', @@ -32,7 +45,11 @@ const QWEN2_5_1_5b = { }, size: '986mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_3b = { name: 'qwen2.5:3b', @@ -43,7 +60,11 @@ const QWEN2_5_3b = { }, size: '1.9gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_7b = { name: 'qwen2.5:7b', @@ -54,7 +75,11 @@ const QWEN2_5_7b = { }, size: '4.7gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_32b = { name: 'qwen2.5:32b', @@ -65,7 +90,11 @@ const QWEN2_5_32b = { }, size: '20gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_5_72b = { name: 'qwen2.5:72b', @@ -76,7 +105,11 @@ const QWEN2_5_72b = { }, size: '47gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const QWEN2_5_MODELS = [ QWEN2_5_LATEST.name, @@ -101,13 +134,41 @@ export const QWEN2_5_MODELS = [ // Manual type map for per-model provider options export type Qwen2_5ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_5_LATEST.name]: ChatRequest - [QWEN2_5_0_5b.name]: ChatRequest - [QWEN2_5_1_5b.name]: ChatRequest - [QWEN2_5_3b.name]: ChatRequest - [QWEN2_5_7b.name]: ChatRequest - [QWEN2_5_32b.name]: ChatRequest - [QWEN2_5_72b.name]: ChatRequest + [QWEN2_5_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_0_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_1_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_3b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_32b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWEN2_5_72b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Qwen2_5ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts index 87e42e14..59554e65 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const QWEN2_LATEST = { name: 'qwen2:latest', @@ -10,7 +15,11 @@ const QWEN2_LATEST = { }, size: '4.4gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_0_5b = { name: 'qwen2:0.5b', @@ -21,7 +30,11 @@ const QWEN2_0_5b = { }, size: '352mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_1_5b = { name: 'qwen2:1.5b', @@ -32,7 +45,11 @@ const QWEN2_1_5b = { }, size: '935mb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_7b = { name: 'qwen2:7b', @@ -43,7 +60,11 @@ const QWEN2_7b = { }, size: '4.4gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWEN2_72b = { name: 'qwen2:72b', @@ -54,7 +75,11 @@ const QWEN2_72b = { }, size: '41gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const QWEN2_MODELS = [ QWEN2_LATEST.name, @@ -77,11 +102,35 @@ export const QWEN2_MODELS = [ // Manual type map for per-model provider options export type Qwen2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_LATEST.name]: ChatRequest - [QWEN2_0_5b.name]: ChatRequest - [QWEN2_1_5b.name]: ChatRequest - [QWEN2_7b.name]: ChatRequest - [QWEN2_72b.name]: ChatRequest + [QWEN2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + + [QWEN2_0_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + + [QWEN2_1_5b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + + [QWEN2_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + + [QWEN2_72b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type Qwen2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts index b3bcbe99..e1cd4a57 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -1,5 +1,12 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestThinking, + OllamaChatRequestTools, + OllamaMessageThinking, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const QWEN3_LATEST = { name: 'qwen3:latest', @@ -10,7 +17,12 @@ const QWEN3_LATEST = { }, size: '5.2gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_0_6b = { name: 'qwen3:0.6b', @@ -21,7 +33,12 @@ const QWEN3_0_6b = { }, size: '523mb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_1_7b = { name: 'qwen3:1.7b', @@ -32,7 +49,12 @@ const QWEN3_1_7b = { }, size: '1.4gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_4b = { name: 'qwen3:4b', @@ -43,7 +65,12 @@ const QWEN3_4b = { }, size: '2.5gb', context: 256_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_8b = { name: 'qwen3:8b', @@ -54,7 +81,12 @@ const QWEN3_8b = { }, size: '5.2gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_14b = { name: 'qwen3:14b', @@ -65,7 +97,12 @@ const QWEN3_14b = { }, size: '9.3gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_30b = { name: 'qwen3:30b', @@ -76,7 +113,12 @@ const QWEN3_30b = { }, size: '19gb', context: 256_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_32b = { name: 'qwen3:32b', @@ -87,7 +129,12 @@ const QWEN3_32b = { }, size: '20gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> const QWEN3_235b = { name: 'qwen3:235b', @@ -98,7 +145,12 @@ const QWEN3_235b = { }, size: '142gb', context: 256_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking +> export const QWEN3_MODELS = [ QWEN3_LATEST.name, @@ -125,15 +177,60 @@ export const QWEN3_MODELS = [ // Manual type map for per-model provider options export type Qwen3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN3_LATEST.name]: ChatRequest - [QWEN3_0_6b.name]: ChatRequest - [QWEN3_1_7b.name]: ChatRequest - [QWEN3_4b.name]: ChatRequest - [QWEN3_8b.name]: ChatRequest - [QWEN3_14b.name]: ChatRequest - [QWEN3_30b.name]: ChatRequest - [QWEN3_32b.name]: ChatRequest - [QWEN3_235b.name]: ChatRequest + [QWEN3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_0_6b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_1_7b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_4b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_8b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_14b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_30b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_32b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > + [QWEN3_235b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + > } export type Qwen3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts index 41738f15..cd2519aa 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -1,5 +1,10 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaChatRequestTools, + OllamaMessageTools, + OllamaModelMeta, +} from './models-meta' const QWQ_LATEST = { name: 'qwq:latest', @@ -10,7 +15,11 @@ const QWQ_LATEST = { }, size: '20gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> const QWQ_32b = { name: 'qwq:32b', @@ -21,7 +30,11 @@ const QWQ_32b = { }, size: '20gb', context: 40_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools +> export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const @@ -38,8 +51,16 @@ export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const // Manual type map for per-model provider options export type QwqChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWQ_LATEST.name]: ChatRequest - [QWQ_32b.name]: ChatRequest + [QWQ_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > + [QWQ_32b.name]: OllamaModelMeta< + OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + > } export type QwqModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts index 9a6ae9f6..12a948b6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const SAILOR2_LATEST = { name: 'sailor2:latest', @@ -10,7 +13,9 @@ const SAILOR2_LATEST = { }, size: '5.2gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SAILOR2_1b = { name: 'sailor2:1b', @@ -21,7 +26,9 @@ const SAILOR2_1b = { }, size: '1.1gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SAILOR2_8b = { name: 'sailor2:8b', @@ -32,8 +39,9 @@ const SAILOR2_8b = { }, size: '5.2gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta - +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SAILOR2_20b = { name: 'sailor2:20b', supports: { @@ -43,7 +51,9 @@ const SAILOR2_20b = { }, size: '12gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const SAILOR2_MODELS = [ SAILOR2_LATEST.name, @@ -64,10 +74,18 @@ export const SAILOR2_MODELS = [ // Manual type map for per-model provider options export type Sailor2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SAILOR2_LATEST.name]: ChatRequest - [SAILOR2_1b.name]: ChatRequest - [SAILOR2_8b.name]: ChatRequest - [SAILOR2_20b.name]: ChatRequest + [SAILOR2_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SAILOR2_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SAILOR2_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SAILOR2_20b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Sailor2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts index 62fa1e6f..7a123bbb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const SHIELDGEMMA_LATEST = { name: 'shieldgemma:latest', @@ -10,7 +13,9 @@ const SHIELDGEMMA_LATEST = { }, size: '5.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SHIELDGEMMA_2b = { name: 'shieldgemma:2b', @@ -21,7 +26,9 @@ const SHIELDGEMMA_2b = { }, size: '1.7gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SHIELDGEMMA_9b = { name: 'shieldgemma:9b', @@ -32,7 +39,9 @@ const SHIELDGEMMA_9b = { }, size: '5.8gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SHIELDGEMMA_27b = { name: 'shieldgemma:27b', @@ -43,7 +52,9 @@ const SHIELDGEMMA_27b = { }, size: '17gb', context: 8_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const SHIELDGEMMA_MODELS = [ SHIELDGEMMA_LATEST.name, @@ -65,10 +76,18 @@ export const SHIELDGEMMA_MODELS = [ // Manual type map for per-model provider options export type ShieldgemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SHIELDGEMMA_LATEST.name]: ChatRequest - [SHIELDGEMMA_2b.name]: ChatRequest - [SHIELDGEMMA_9b.name]: ChatRequest - [SHIELDGEMMA_27b.name]: ChatRequest + [SHIELDGEMMA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SHIELDGEMMA_2b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SHIELDGEMMA_9b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SHIELDGEMMA_27b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type ShieldgemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts index eafdeb8e..e16cab2b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const SMALLTINKER_LATEST = { name: 'smalltinker:latest', @@ -10,7 +13,9 @@ const SMALLTINKER_LATEST = { }, size: '3.6gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SMALLTINKER_3b = { name: 'smalltinker:3b', @@ -21,7 +26,9 @@ const SMALLTINKER_3b = { }, size: '3.6gb', context: 32_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const SMALLTINKER_MODELS = [ SMALLTINKER_LATEST.name, @@ -41,8 +48,12 @@ export const SMALLTINKER_MODELS = [ // Manual type map for per-model provider options export type SmalltinkerChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SMALLTINKER_LATEST.name]: ChatRequest - [SMALLTINKER_3b.name]: ChatRequest + [SMALLTINKER_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SMALLTINKER_3b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type SmalltinkerModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts index 79ebc939..09928c11 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const SMOLLM_LATEST = { name: 'smollm:latest', @@ -10,7 +13,9 @@ const SMOLLM_LATEST = { }, size: '991mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SMOLLM_135m = { name: 'smollm:135m', @@ -21,7 +26,9 @@ const SMOLLM_135m = { }, size: '92mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SMOLLM_360m = { name: 'smollm:360m', @@ -32,7 +39,9 @@ const SMOLLM_360m = { }, size: '229mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const SMOLLM_1_7b = { name: 'smollm:1.7b', @@ -43,7 +52,9 @@ const SMOLLM_1_7b = { }, size: '991mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const SMOLLM_MODELS = [ SMOLLM_LATEST.name, @@ -65,10 +76,18 @@ export const SMOLLM_MODELS = [ // Manual type map for per-model provider options export type SmollmChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SMOLLM_LATEST.name]: ChatRequest - [SMOLLM_135m.name]: ChatRequest - [SMOLLM_360m.name]: ChatRequest - [SMOLLM_1_7b.name]: ChatRequest + [SMOLLM_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SMOLLM_135m.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SMOLLM_360m.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [SMOLLM_1_7b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type SmollmModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts index a4b0e110..62f9efa6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const TINNYLLAMA_LATEST = { name: 'tinnyllama:latest', @@ -10,7 +13,9 @@ const TINNYLLAMA_LATEST = { }, size: '638mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const TINNYLLAMA_1_1b = { name: 'tinnyllama:1.1b', @@ -21,7 +26,9 @@ const TINNYLLAMA_1_1b = { }, size: '638mb', context: 2_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const TINNYLLAMA_MODELS = [ TINNYLLAMA_LATEST.name, @@ -41,8 +48,12 @@ export const TINNYLLAMA_MODELS = [ // Manual type map for per-model provider options export type TinnyllamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [TINNYLLAMA_LATEST.name]: ChatRequest - [TINNYLLAMA_1_1b.name]: ChatRequest + [TINNYLLAMA_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [TINNYLLAMA_1_1b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type TinnyllamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts index c76e6519..ba860a6b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -1,5 +1,8 @@ -import type { ChatRequest } from 'ollama' -import type { DefaultOllamaModelMeta } from './models-meta' +import type { + OllamaChatRequest, + OllamaChatRequestMessages, + OllamaModelMeta, +} from './models-meta' const TULU3_LATEST = { name: 'tulu3:latest', @@ -10,7 +13,9 @@ const TULU3_LATEST = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const TULU3_8b = { name: 'tulu3:8b', @@ -21,7 +26,9 @@ const TULU3_8b = { }, size: '4.9gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> const TULU3_70b = { name: 'tulu3:70b', @@ -32,7 +39,9 @@ const TULU3_70b = { }, size: '43gb', context: 128_000, -} as const satisfies DefaultOllamaModelMeta +} as const satisfies OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages +> export const TULU3_MODELS = [ TULU3_LATEST.name, @@ -53,9 +62,15 @@ export const TULU3_MODELS = [ // Manual type map for per-model provider options export type Tulu3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [TULU3_LATEST.name]: ChatRequest - [TULU3_8b.name]: ChatRequest - [TULU3_70b.name]: ChatRequest + [TULU3_LATEST.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [TULU3_8b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > + [TULU3_70b.name]: OllamaModelMeta< + OllamaChatRequest & OllamaChatRequestMessages + > } export type Tulu3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/models-meta.ts b/packages/typescript/ai-ollama/src/meta/models-meta.ts index 099432b4..971c502d 100644 --- a/packages/typescript/ai-ollama/src/meta/models-meta.ts +++ b/packages/typescript/ai-ollama/src/meta/models-meta.ts @@ -1,6 +1,8 @@ -export interface DefaultOllamaModelMeta { +import type { Options, Tool, ToolCall } from 'ollama' + +export interface OllamaModelMeta { name: string - providerOptions?: TProviderOptions + modelOptions?: TModelOptions supports?: { input?: Array<'text' | 'image' | 'video'> output?: Array<'text' | 'image' | 'video'> @@ -9,3 +11,82 @@ export interface DefaultOllamaModelMeta { size?: string context?: number } + +// interface ChatRequest { +// model: string +// messages?: Message[] +// stream?: boolean +// format?: string | object +// keep_alive?: string | number +// tools?: Tool[] +// think?: boolean | 'high' | 'medium' | 'low' +// logprobs?: boolean +// top_logprobs?: number +// options?: Partial +// } + +export interface OllamaChatRequest { + model: string + // messages?: Message[] + stream?: boolean + format?: string | object + keep_alive?: string | number + // tools?: Tool[] + // think?: boolean | 'high' | 'medium' | 'low' + logprobs?: boolean + top_logprobs?: number + options?: Partial +} + +export interface OllamaChatRequestThinking { + think?: boolean +} + +export interface OllamaChatRequestThinking_OpenAI { + think?: 'low' | 'medium' | 'high' +} + +export interface OllamaChatRequestTools { + tools?: Array +} + +// interface Message { +// role: string +// content: string +// thinking?: string +// images?: Uint8Array[] | string[] +// tool_calls?: ToolCall[] +// tool_name?: string +// } + +export interface OllamaChatRequestMessages< + TMessageExtension extends OllamaMessageExtension = undefined, +> { + messages?: Array< + { + role: string + content: string + // thinking?: string + // images?: Uint8Array[] | string[] + // tool_calls?: ToolCall[] + // tool_name?: string + } & TMessageExtension + > +} + +export interface OllamaMessageThinking { + thinking?: string +} + +export interface OllamaMessageImages { + images?: Array | Array +} + +export interface OllamaMessageTools { + tool_calls?: Array + tool_name?: string +} + +type OllamaMessageExtension = + | Partial + | undefined diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts index 0c457861..e977ac65 100644 --- a/packages/typescript/ai-ollama/src/ollama-adapter.ts +++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts @@ -12,6 +12,7 @@ import type { Tool as OllamaTool, ToolCall, } from 'ollama' + import type { ChatOptions, DefaultMessageMetadataByModality, From dd904ca2822dc15f924404ad9bb7140afb97aeff Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 23 Dec 2025 10:24:14 +0100 Subject: [PATCH 3/7] fix rebase --- .../typescript/ai-ollama/src/adapters/text.ts | 28 +- packages/typescript/ai-ollama/src/index.ts | 2 +- .../typescript/ai-ollama/src/model-meta.ts | 2 +- .../ai-ollama/src/ollama-adapter.ts | 482 ------------------ 4 files changed, 3 insertions(+), 511 deletions(-) delete mode 100644 packages/typescript/ai-ollama/src/ollama-adapter.ts diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 377bec9a..93b81931 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -16,33 +16,7 @@ import type { ToolCall, } from 'ollama' import type { StreamChunk, TextOptions, Tool } from '@tanstack/ai' - -/** - * Ollama text models - * Note: Ollama models are dynamically loaded, this is a common subset - */ -export const OllamaTextModels = [ - 'llama2', - 'llama3', - 'llama3.1', - 'llama3.2', - 'codellama', - 'mistral', - 'mixtral', - 'phi', - 'phi3', - 'neural-chat', - 'starling-lm', - 'orca-mini', - 'vicuna', - 'nous-hermes', - 'qwen2', - 'qwen2.5', - 'gemma', - 'gemma2', - 'deepseek-coder', - 'command-r', -] as const +import type { OllamaTextModels } from '../model-meta' export type OllamaTextModel = (typeof OllamaTextModels)[number] | (string & {}) diff --git a/packages/typescript/ai-ollama/src/index.ts b/packages/typescript/ai-ollama/src/index.ts index a919deb2..39d973af 100644 --- a/packages/typescript/ai-ollama/src/index.ts +++ b/packages/typescript/ai-ollama/src/index.ts @@ -5,13 +5,13 @@ // Text/Chat adapter export { OllamaTextAdapter, - OllamaTextModels, createOllamaChat, ollamaText, type OllamaTextAdapterOptions, type OllamaTextModel, type OllamaTextProviderOptions, } from './adapters/text' +export { OllamaTextModels } from './model-meta' // Summarize adapter export { diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts index 1b9a3e8c..b02cb39e 100644 --- a/packages/typescript/ai-ollama/src/model-meta.ts +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -130,7 +130,7 @@ import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' -export const OLLAMA_MODELS = [ +export const OllamaTextModels = [ ...ATHENE_MODELS, ...AYA_MODELS, ...CODEGEMMA_MODELS, diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts deleted file mode 100644 index e977ac65..00000000 --- a/packages/typescript/ai-ollama/src/ollama-adapter.ts +++ /dev/null @@ -1,482 +0,0 @@ -import { Ollama as OllamaSDK } from 'ollama' -import { BaseAdapter, convertZodToJsonSchema } from '@tanstack/ai' - -import { OLLAMA_MODELS } from './model-meta' - -import type { OllamaModelInputModalitiesByName } from './model-meta' -import type { - AbortableAsyncIterator, - ChatRequest, - ChatResponse, - Message, - Tool as OllamaTool, - ToolCall, -} from 'ollama' - -import type { - ChatOptions, - DefaultMessageMetadataByModality, - EmbeddingOptions, - EmbeddingResult, - StreamChunk, - SummarizationOptions, - SummarizationResult, - Tool, -} from '@tanstack/ai' - -export interface OllamaConfig { - host?: string -} - -const OLLAMA_EMBEDDING_MODELS = [] as const - -/** - * Type-only map from Ollama model name to its provider-specific options. - * Ollama models share the same options interface. - */ -export type OllamaChatModelProviderOptionsByName = { - [K in (typeof OLLAMA_MODELS)[number]]: OllamaProviderOptions -} - -/** - * Ollama-specific provider options - * Based on Ollama API options - * @see https://github.com/ollama/ollama/blob/main/docs/api.md - */ -interface OllamaProviderOptions { - /** Number of tokens to keep from the prompt */ - num_keep?: number - /** Number of tokens from context to consider for next token prediction */ - top_k?: number - /** Minimum probability for nucleus sampling */ - min_p?: number - /** Tail-free sampling parameter */ - tfs_z?: number - /** Typical probability sampling parameter */ - typical_p?: number - /** Number of previous tokens to consider for repetition penalty */ - repeat_last_n?: number - /** Penalty for repeating tokens */ - repeat_penalty?: number - /** Enable Mirostat sampling (0=disabled, 1=Mirostat, 2=Mirostat 2.0) */ - mirostat?: number - /** Target entropy for Mirostat */ - mirostat_tau?: number - /** Learning rate for Mirostat */ - mirostat_eta?: number - /** Enable penalize_newline */ - penalize_newline?: boolean - /** Enable NUMA support */ - numa?: boolean - /** Context window size */ - num_ctx?: number - /** Batch size for prompt processing */ - num_batch?: number - /** Number of GQA groups (for some models) */ - num_gqa?: number - /** Number of GPU layers to use */ - num_gpu?: number - /** GPU to use for inference */ - main_gpu?: number - /** Use memory-mapped model */ - use_mmap?: boolean - /** Use memory-locked model */ - use_mlock?: boolean - /** Number of threads to use */ - num_thread?: number -} - -export class Ollama extends BaseAdapter< - typeof OLLAMA_MODELS, - typeof OLLAMA_EMBEDDING_MODELS, - OllamaProviderOptions, - Record, - OllamaChatModelProviderOptionsByName, - OllamaModelInputModalitiesByName, - DefaultMessageMetadataByModality -> { - name = 'ollama' as const - models = OLLAMA_MODELS - embeddingModels = OLLAMA_EMBEDDING_MODELS - - // Type-only map used by core AI to infer per-model provider options. - // This is never set at runtime; it exists purely for TypeScript. - declare _modelProviderOptionsByName: OllamaChatModelProviderOptionsByName - // Type-only map for model input modalities; used for multimodal content type constraints - declare _modelInputModalitiesByName: OllamaModelInputModalitiesByName - // Type-only map for message metadata types; used for type-safe metadata autocomplete - declare _messageMetadataByModality: DefaultMessageMetadataByModality - - private client: OllamaSDK - - constructor(config: OllamaConfig = {}) { - super({}) - this.client = new OllamaSDK({ - host: config.host || 'http://localhost:11434', - }) - } - - async *chatStream(options: ChatOptions): AsyncIterable { - // Use stream converter for now - // Map common options to Ollama format - const mappedOptions = this.mapCommonOptionsToOllama(options) - const response = await this.client.chat({ - ...mappedOptions, - stream: true, - }) - yield* this.processOllamaStreamChunks(response) - } - - async summarize(options: SummarizationOptions): Promise { - const prompt = this.buildSummarizationPrompt(options, options.text) - - const response = await this.client.generate({ - model: options.model || 'llama2', - prompt, - options: { - temperature: 0.3, - num_predict: options.maxLength || 500, - }, - stream: false, - }) - - const promptTokens = this.estimateTokens(prompt) - const completionTokens = this.estimateTokens(response.response) - - return { - id: this.generateId(), - model: response.model, - summary: response.response, - usage: { - promptTokens, - completionTokens, - totalTokens: promptTokens + completionTokens, - }, - } - } - - async createEmbeddings(options: EmbeddingOptions): Promise { - const inputs = Array.isArray(options.input) - ? options.input - : [options.input] - const embeddings: Array> = [] - - for (const input of inputs) { - const response = await this.client.embeddings({ - model: options.model || 'nomic-embed-text', - prompt: input, - }) - embeddings.push(response.embedding) - } - - const promptTokens = inputs.reduce( - (sum, input) => sum + this.estimateTokens(input), - 0, - ) - - return { - id: this.generateId(), - model: options.model || 'nomic-embed-text', - embeddings, - usage: { - promptTokens, - totalTokens: promptTokens, - }, - } - } - - private buildSummarizationPrompt( - options: SummarizationOptions, - text: string, - ): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - prompt += `\n\nText to summarize:\n${text}\n\nSummary:` - - return prompt - } - - private estimateTokens(text: string): number { - // Rough approximation: 1 token ≈ 4 characters - return Math.ceil(text.length / 4) - } - - private async *processOllamaStreamChunks( - stream: AbortableAsyncIterator, - ): AsyncIterable { - let accumulatedContent = '' - const timestamp = Date.now() - const responseId: string = this.generateId() - let accumulatedReasoning = '' - let hasEmittedToolCalls = false - for await (const chunk of stream) { - function handleToolCall(toolCall: ToolCall): StreamChunk { - // we cast because the library types are missing id and index - const actualToolCall = toolCall as ToolCall & { - id: string - function: { index: number } - } - return { - type: 'tool_call', - id: responseId, - model: chunk.model, - timestamp, - toolCall: { - type: 'function', - id: actualToolCall.id, - function: { - name: actualToolCall.function.name || '', - arguments: - typeof actualToolCall.function.arguments === 'string' - ? actualToolCall.function.arguments - : JSON.stringify(actualToolCall.function.arguments), - }, - }, - index: actualToolCall.function.index, - } - } - if (chunk.done) { - if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { - for (const toolCall of chunk.message.tool_calls) { - yield handleToolCall(toolCall) - hasEmittedToolCalls = true - } - yield { - type: 'done', - id: responseId || this.generateId(), - model: chunk.model, - timestamp, - finishReason: 'tool_calls', - } - continue - } - yield { - type: 'done', - id: responseId || this.generateId(), - model: chunk.model, - timestamp, - finishReason: hasEmittedToolCalls ? 'tool_calls' : 'stop', - } - continue - } - if (chunk.message.content) { - accumulatedContent += chunk.message.content - yield { - type: 'content', - id: responseId || this.generateId(), - model: chunk.model, - timestamp, - delta: chunk.message.content, - content: accumulatedContent, - role: 'assistant', - } - } - - if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { - for (const toolCall of chunk.message.tool_calls) { - yield handleToolCall(toolCall) - hasEmittedToolCalls = true - } - } - if (chunk.message.thinking) { - accumulatedReasoning += chunk.message.thinking - yield { - type: 'thinking', - id: responseId || this.generateId(), - model: chunk.model, - timestamp, - content: accumulatedReasoning, - delta: chunk.message.thinking, - } - } - } - } - - /** - * Converts standard Tool format to Ollama-specific tool format - * Ollama uses OpenAI-compatible tool format - */ - private convertToolsToOllamaFormat( - tools?: Array, - ): Array | undefined { - if (!tools || tools.length === 0) { - return undefined - } - - return tools.map((tool) => ({ - type: 'function', - function: { - name: tool.name, - description: tool.description, - parameters: convertZodToJsonSchema(tool.inputSchema), - }, - })) - } - - /** - * Formats messages for Ollama, handling tool calls, tool results, and multimodal content - */ - private formatMessages(messages: ChatOptions['messages']): Array { - return messages.map((msg) => { - let textContent = '' - const images: Array = [] - - // Handle multimodal content - if (Array.isArray(msg.content)) { - for (const part of msg.content) { - if (part.type === 'text') { - textContent += part.content - } else if (part.type === 'image') { - // Ollama accepts base64 strings for images - if (part.source.type === 'data') { - images.push(part.source.value) - } else { - // URL-based images not directly supported, but we pass the URL - // Ollama may need the image to be fetched externally - images.push(part.source.value) - } - } - // Ollama doesn't support audio/video/document directly, skip them - } - } else { - textContent = msg.content || '' - } - - const hasToolCallId = msg.role === 'tool' && msg.toolCallId - return { - role: hasToolCallId ? 'tool' : msg.role, - content: hasToolCallId - ? typeof msg.content === 'string' - ? msg.content - : JSON.stringify(msg.content) - : textContent, - // Add images if present - ...(images.length > 0 ? { images: images } : {}), - ...(msg.role === 'assistant' && - msg.toolCalls && - msg.toolCalls.length > 0 - ? { - tool_calls: msg.toolCalls.map((toolCall) => { - // Parse string arguments to object for Ollama - let parsedArguments = {} - if (typeof toolCall.function.arguments === 'string') { - try { - parsedArguments = JSON.parse(toolCall.function.arguments) - } catch { - parsedArguments = {} - } - } else { - parsedArguments = toolCall.function.arguments - } - - return { - id: toolCall.id, - type: toolCall.type, - function: { - name: toolCall.function.name, - arguments: parsedArguments, - }, - } - }), - } - : {}), - } - }) - } - - /** - * Maps common options to Ollama-specific format - * Handles translation of normalized options to Ollama's API format - */ - private mapCommonOptionsToOllama(options: ChatOptions): ChatRequest { - const providerOptions = options.providerOptions as - | OllamaProviderOptions - | undefined - const ollamaOptions = { - temperature: options.options?.temperature, - top_p: options.options?.topP, - num_predict: options.options?.maxTokens, - ...providerOptions, - } - - return { - model: options.model, - options: ollamaOptions, - messages: this.formatMessages(options.messages), - tools: this.convertToolsToOllamaFormat(options.tools), - } - } -} - -/** - * Creates an Ollama adapter with simplified configuration - * @param host - Optional Ollama server host (defaults to http://localhost:11434) - * @returns A fully configured Ollama adapter instance - * - * @example - * ```typescript - * const ollama = createOllama(); - * // or with custom host - * const ollama = createOllama("http://localhost:11434"); - * - * const ai = new AI({ - * adapters: { - * ollama, - * } - * }); - * ``` - */ -export function createOllama( - host?: string, - config?: Omit, -): Ollama { - return new Ollama({ host, ...config }) -} - -/** - * Create an Ollama adapter with automatic host detection from environment variables. - * - * Looks for `OLLAMA_HOST` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * Falls back to default Ollama host if not found. - * - * @param config - Optional configuration (excluding host which is auto-detected) - * @returns Configured Ollama adapter instance - * - * @example - * ```typescript - * // Automatically uses OLLAMA_HOST from environment or defaults to http://localhost:11434 - * const aiInstance = ai(ollama()); - * ``` - */ -export function ollama(config?: Omit): Ollama { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const host = env?.OLLAMA_HOST - - return createOllama(host, config) -} From a76fe10cb8ad585954057573e892242c1c6a06c7 Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 23 Dec 2025 20:57:09 +0100 Subject: [PATCH 4/7] updated model meta --- .../ai-ollama/src/adapters/summarize.ts | 24 +---- .../typescript/ai-ollama/src/adapters/text.ts | 2 +- packages/typescript/ai-ollama/src/index.ts | 2 +- .../ai-ollama/src/meta/model-meta-athene.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-aya.ts | 8 +- .../src/meta/model-meta-codegemma.ts | 12 +-- .../src/meta/model-meta-codellama.ts | 20 ++--- .../src/meta/model-meta-command-r-plus.ts | 16 ++-- .../src/meta/model-meta-command-r.ts | 16 ++-- .../src/meta/model-meta-command-r7b.ts | 16 ++-- .../src/meta/model-meta-deepseek-coder-v2.ts | 12 +-- .../src/meta/model-meta-deepseek-ocr.ts | 10 +-- .../src/meta/model-meta-deepseek-r1.ts | 84 ++++++++--------- .../src/meta/model-meta-deepseek-v3.1.ts | 30 +++---- .../ai-ollama/src/meta/model-meta-devstral.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-dolphin3.ts | 8 +- .../src/meta/model-meta-exaone3.5.ts | 16 +--- .../ai-ollama/src/meta/model-meta-falcon2.ts | 8 +- .../ai-ollama/src/meta/model-meta-falcon3.ts | 20 ++--- .../src/meta/model-meta-firefunction-v2.ts | 8 +- .../ai-ollama/src/meta/model-meta-gemma.ts | 12 +-- .../ai-ollama/src/meta/model-meta-gemma2.ts | 16 +--- .../ai-ollama/src/meta/model-meta-gemma3.ts | 30 +++---- .../src/meta/model-meta-granite3-dense.ts | 12 +-- .../src/meta/model-meta-granite3-guardian.ts | 12 +-- .../src/meta/model-meta-granite3-moe.ts | 24 ++--- .../src/meta/model-meta-granite3.1-dense.ts | 24 ++--- .../src/meta/model-meta-granite3.1-moe.ts | 24 ++--- .../src/meta/model-meta-llama-guard3.ts | 12 +-- .../ai-ollama/src/meta/model-meta-llama2.ts | 16 +--- .../src/meta/model-meta-llama3-chatqa.ts | 12 +-- .../src/meta/model-meta-llama3-gradient.ts | 12 +-- .../ai-ollama/src/meta/model-meta-llama3.1.ts | 32 +++---- .../src/meta/model-meta-llama3.2-vision.ts | 15 ++-- .../ai-ollama/src/meta/model-meta-llama3.2.ts | 24 ++--- .../ai-ollama/src/meta/model-meta-llama3.3.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-llama3.ts | 12 +-- .../ai-ollama/src/meta/model-meta-llama4.ts | 24 ++--- .../src/meta/model-meta-llava-llama3.ts | 16 ++-- .../src/meta/model-meta-llava-phi3.ts | 10 +-- .../ai-ollama/src/meta/model-meta-llava.ts | 23 +++-- .../ai-ollama/src/meta/model-meta-marco-o1.ts | 8 +- .../src/meta/model-meta-mistral-large.ts | 16 ++-- .../src/meta/model-meta-mistral-nemo.ts | 16 ++-- .../src/meta/model-meta-mistral-small.ts | 24 ++--- .../ai-ollama/src/meta/model-meta-mistral.ts | 11 ++- .../ai-ollama/src/meta/model-meta-mixtral.ts | 24 ++--- .../src/meta/model-meta-moondream.ts | 10 +-- .../src/meta/model-meta-nemotron-mini.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-nemotron.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-olmo2.ts | 12 +-- .../src/meta/model-meta-opencoder.ts | 12 +-- .../src/meta/model-meta-openhermes.ts | 12 +-- .../ai-ollama/src/meta/model-meta-phi3.ts | 12 +-- .../ai-ollama/src/meta/model-meta-phi4.ts | 8 +- .../ai-ollama/src/meta/model-meta-qwen.ts | 32 ++----- .../src/meta/model-meta-qwen2.5-coder.ts | 56 +++++------- .../ai-ollama/src/meta/model-meta-qwen2.5.ts | 56 +++++------- .../ai-ollama/src/meta/model-meta-qwen2.ts | 44 ++++----- .../ai-ollama/src/meta/model-meta-qwen3.ts | 90 ++++++++----------- .../ai-ollama/src/meta/model-meta-qwq.ts | 16 ++-- .../ai-ollama/src/meta/model-meta-sailor2.ts | 16 +--- .../src/meta/model-meta-shieldgemma.ts | 16 +--- .../src/meta/model-meta-smalltinker.ts | 8 +- .../ai-ollama/src/meta/model-meta-smollm.ts | 16 +--- .../src/meta/model-meta-tinyllama.ts | 8 +- .../ai-ollama/src/meta/model-meta-tulu3.ts | 12 +-- 67 files changed, 441 insertions(+), 848 deletions(-) diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index 3c9d6525..82ec81a9 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -5,6 +5,7 @@ import { getOllamaHostFromEnv, } from '../utils' +import type { OllamaTextModels as OllamaSummarizeModels } from '../model-meta' import type { Ollama } from 'ollama' import type { SummarizeAdapter } from '@tanstack/ai/adapters' import type { @@ -13,23 +14,6 @@ import type { SummarizationResult, } from '@tanstack/ai' -/** - * Ollama models suitable for summarization - * Note: Ollama models are dynamically loaded, this is a common subset - */ -export const OllamaSummarizeModels = [ - 'llama2', - 'llama3', - 'llama3.1', - 'llama3.2', - 'mistral', - 'mixtral', - 'phi', - 'phi3', - 'qwen2', - 'qwen2.5', -] as const - export type OllamaSummarizeModel = | (typeof OllamaSummarizeModels)[number] | (string & {}) @@ -64,9 +48,9 @@ export interface OllamaSummarizeAdapterOptions { * Ollama Summarize Adapter * A tree-shakeable summarization adapter for Ollama */ -export class OllamaSummarizeAdapter< - TModel extends OllamaSummarizeModel, -> implements SummarizeAdapter { +export class OllamaSummarizeAdapter + implements SummarizeAdapter +{ readonly kind = 'summarize' as const readonly name = 'ollama' as const readonly model: TModel diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 93b81931..8675c4d0 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -2,6 +2,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' import { createOllamaClient, generateId, getOllamaHostFromEnv } from '../utils' +import type { OllamaTextModels } from '../model-meta' import type { StructuredOutputOptions, StructuredOutputResult, @@ -16,7 +17,6 @@ import type { ToolCall, } from 'ollama' import type { StreamChunk, TextOptions, Tool } from '@tanstack/ai' -import type { OllamaTextModels } from '../model-meta' export type OllamaTextModel = (typeof OllamaTextModels)[number] | (string & {}) diff --git a/packages/typescript/ai-ollama/src/index.ts b/packages/typescript/ai-ollama/src/index.ts index 39d973af..f49336c0 100644 --- a/packages/typescript/ai-ollama/src/index.ts +++ b/packages/typescript/ai-ollama/src/index.ts @@ -16,13 +16,13 @@ export { OllamaTextModels } from './model-meta' // Summarize adapter export { OllamaSummarizeAdapter, - OllamaSummarizeModels, createOllamaSummarize, ollamaSummarize, type OllamaSummarizeAdapterOptions, type OllamaSummarizeModel, type OllamaSummarizeProviderOptions, } from './adapters/summarize' +export { OllamaTextModels as OllamaSummarizeModels } from './model-meta' // =========================== // Type Exports diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts index de2820c9..d9171a3b 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-athene.ts @@ -54,16 +54,12 @@ export const ATHENE_MODELS = [ // Manual type map for per-model provider options export type AtheneChatModelProviderOptionsByName = { // Models with thinking and structured output support - [ATHENE_V2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [ATHENE_V2_72b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [ATHENE_V2_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [ATHENE_V2_72b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type AtheneModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts index c4f6f070..afa93948 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-aya.ts @@ -58,11 +58,9 @@ export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const // Manual type map for per-model provider options export type AyaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [AYA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [AYA_8b.name]: OllamaModelMeta - [AYA_35b.name]: OllamaModelMeta + [AYA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [AYA_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [AYA_35b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type AyaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts index 0ef618a8..899cc077 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts @@ -62,15 +62,9 @@ export const CODEGEMMA_MODELS = [ // Manual type map for per-model provider options export type CodegemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [CODEGEMMA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODEGEMMA_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODEGEMMA_35b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [CODEGEMMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODEGEMMA_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODEGEMMA_35b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type CodegemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts index 3458c73b..c03fc14f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-codellama.ts @@ -90,21 +90,11 @@ export const CODELLAMA_MODELS = [ // Manual type map for per-model provider options export type CodellamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [CODELLAMA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODELLAMA_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODELLAMA_13b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODELLAMA_34b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [CODELLAMA_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [CODELLAMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODELLAMA_7b.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODELLAMA_13b.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODELLAMA_34b.name]: OllamaChatRequest & OllamaChatRequestMessages + [CODELLAMA_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type CodellamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts index ebecf4f6..c053477c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r-plus.ts @@ -54,16 +54,12 @@ export const COMMAND_R_PLUS_MODELS = [ // Manual type map for per-model provider options export type CommandRPlusChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_PLUS_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [COMMAND_R_PLUS_104b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [COMMAND_R_PLUS_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [COMMAND_R_PLUS_104b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type CommandRPlusModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts index fc92dd12..84e74e59 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r.ts @@ -54,16 +54,12 @@ export const COMMAND_R_MODELS = [ // Manual type map for per-model provider options export type CommandRChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [COMMAND_R_35b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [COMMAND_R_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [COMMAND_R_35b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type CommandRModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts index 55dbd7e0..f45d1774 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-command-r7b.ts @@ -54,16 +54,12 @@ export const COMMAND_R_7b_MODELS = [ // Manual type map for per-model provider options export type CommandR7bChatModelProviderOptionsByName = { // Models with thinking and structured output support - [COMMAND_R_7b_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [COMMAND_R_7b_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [COMMAND_R_7b_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [COMMAND_R_7b_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type CommandR7bModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts index 1f722652..7f9085a4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-coder-v2.ts @@ -62,15 +62,9 @@ export const DEEPSEEK_CODER_V2_MODELS = [ // Manual type map for per-model provider options export type DeepseekCoderV2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_CODER_V2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [DEEPSEEK_CODER_V2_16b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [DEEPSEEK_CODER_V2_236b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [DEEPSEEK_CODER_V2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [DEEPSEEK_CODER_V2_16b.name]: OllamaChatRequest & OllamaChatRequestMessages + [DEEPSEEK_CODER_V2_236b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type DeepseekCoderV2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts index dcfca427..4ed5428c 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-ocr.ts @@ -50,12 +50,10 @@ export const DEEPSEEK_OCR_MODELS = [ // Manual type map for per-model provider options export type DeepseekOcrChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_OCR_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [DEEPSEEK_OCR_3b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [DEEPSEEK_OCR_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + [DEEPSEEK_OCR_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type DeepseekOcrModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts index d6ad2d3f..532b25cd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -19,7 +19,7 @@ const DEEPSEEK_R1_LATEST = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -35,7 +35,7 @@ const DEEPSEEK_R1_1_5b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -51,7 +51,7 @@ const DEEPSEEK_R1_7b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -67,7 +67,7 @@ const DEEPSEEK_R1_8b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -83,7 +83,7 @@ const DEEPSEEK_R1_32b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -99,7 +99,7 @@ const DEEPSEEK_R1_70b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -115,7 +115,7 @@ const DEEPSEEK_R1_671b = { context: 128_000, } as const satisfies OllamaModelMeta< OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking > @@ -143,48 +143,34 @@ export const DEEPSEEK_R1_MODELS = [ // Manual type map for per-model provider options export type DeepseekR1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_R1_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_1_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_8b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_32b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_70b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_R1_671b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > + [DEEPSEEK_R1_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_1_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_32b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_70b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_R1_671b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking } export type DeepseekR1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts index 899a7d51..6f20de7f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-v3.1.ts @@ -75,24 +75,18 @@ export const DEEPSEEK_V3_1_MODELS = [ // Manual type map for per-model provider options export type Deepseekv3_1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEEPSEEK_V3_1_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_V3_1_671b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [DEEPSEEK_V3_1_671b_cloud.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > + [DEEPSEEK_V3_1_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_V3_1_671b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [DEEPSEEK_V3_1_671b_cloud.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking } export type Deepseekv3_1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts index 2ec725d9..a8382d85 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-devstral.ts @@ -54,16 +54,12 @@ export const DEVSTRAL_MODELS = [ // Manual type map for per-model provider options export type DevstralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DEVSTRAL_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [DEVSTRAL_24b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [DEVSTRAL_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [DEVSTRAL_24b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type DevstralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts index a044a257..f427efad 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-dolphin3.ts @@ -45,12 +45,8 @@ export const DOLPHIN3_MODELS = [DOLPHIN3_LATEST.name, DOLPHIN3_8b.name] as const // Manual type map for per-model provider options export type Dolphin3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [DOLPHIN3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [DOLPHIN3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [DOLPHIN3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [DOLPHIN3_8b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Dolphin3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts index 7a2b84e6..22a12d2d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-exaone3.5.ts @@ -76,18 +76,10 @@ export const EXAONE3_5MODELS = [ // Manual type map for per-model provider options export type Exaone3_5ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [EXAONE3_5_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [EXAONE3_5_2_4b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [EXAONE3_5_7_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [EXAONE3_5_32b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [EXAONE3_5_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [EXAONE3_5_2_4b.name]: OllamaChatRequest & OllamaChatRequestMessages + [EXAONE3_5_7_1b.name]: OllamaChatRequest & OllamaChatRequestMessages + [EXAONE3_5_32b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Exaone3_5ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts index 8824d6fc..63ccaf27 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon2.ts @@ -45,12 +45,8 @@ export const FALCON2_MODELS = [FALCON2_LATEST.name, FALCON2_11b.name] as const // Manual type map for per-model provider options export type Falcon2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FALCON2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FALCON2_11b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [FALCON2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [FALCON2_11b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Falcon2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts index d34dc4d5..54b00901 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-falcon3.ts @@ -90,21 +90,11 @@ export const FALCON3_MODELS = [ // Manual type map for per-model provider options export type Falcon3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FALCON3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FALCON3_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FALCON3_3b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FALCON3_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FALCON3_10b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [FALCON3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [FALCON3_1b.name]: OllamaChatRequest & OllamaChatRequestMessages + [FALCON3_3b.name]: OllamaChatRequest & OllamaChatRequestMessages + [FALCON3_7b.name]: OllamaChatRequest & OllamaChatRequestMessages + [FALCON3_10b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Falcon3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts index f15d31b4..6a76801d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-firefunction-v2.ts @@ -48,12 +48,8 @@ export const FIREFUNCTION_V2_MODELS = [ // Manual type map for per-model provider options export type Firefunction_V2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [FIREFUNCTION_V2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [FIREFUNCTION_V2_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [FIREFUNCTION_V2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [FIREFUNCTION_V2_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Firefunction_V2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts index 1d3cd037..e826beeb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma.ts @@ -62,15 +62,9 @@ export const GEMMA_MODELS = [ // Manual type map for per-model provider options export type GemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA_2b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [GEMMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [GEMMA_2b.name]: OllamaChatRequest & OllamaChatRequestMessages + [GEMMA_7b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type GemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts index 4b2f295a..30d86dcd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma2.ts @@ -76,18 +76,10 @@ export const GEMMA2_MODELS = [ // Manual type map for per-model provider options export type Gemma2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA2_2b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA2_9b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA2_27b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [GEMMA2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [GEMMA2_2b.name]: OllamaChatRequest & OllamaChatRequestMessages + [GEMMA2_9b.name]: OllamaChatRequest & OllamaChatRequestMessages + [GEMMA2_27b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Gemma2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts index 1eb1e59e..d16a7415 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-gemma3.ts @@ -105,24 +105,18 @@ export const GEMMA3_MODELS = [ // Manual type map for per-model provider options export type Gemma3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GEMMA3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA3_270m.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA3_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA3_4b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA3_12b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GEMMA3_27b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [GEMMA3_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + [GEMMA3_270m.name]: OllamaChatRequest & + OllamaChatRequestMessages + [GEMMA3_1b.name]: OllamaChatRequest & + OllamaChatRequestMessages + [GEMMA3_4b.name]: OllamaChatRequest & + OllamaChatRequestMessages + [GEMMA3_12b.name]: OllamaChatRequest & + OllamaChatRequestMessages + [GEMMA3_27b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type Gemma3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts index 545295bb..8f8f64df 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-dense.ts @@ -62,15 +62,9 @@ export const GRANITE3_DENSE_MODELS = [ // Manual type map for per-model provider options export type Granite3DenseChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_DENSE_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GRANITE3_DENSE_2b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GRANITE3_DENSE_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [GRANITE3_DENSE_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [GRANITE3_DENSE_2b.name]: OllamaChatRequest & OllamaChatRequestMessages + [GRANITE3_DENSE_8b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Granite3DenseModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts index 436939f1..f3950d2f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-guardian.ts @@ -62,15 +62,9 @@ export const GRANITE3_GUARDIAN_MODELS = [ // Manual type map for per-model provider options export type Granite3GuardianChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_GUARDIAN_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GRANITE3_GUARDIAN_2b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [GRANITE3_GUARDIAN_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [GRANITE3_GUARDIAN_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [GRANITE3_GUARDIAN_2b.name]: OllamaChatRequest & OllamaChatRequestMessages + [GRANITE3_GUARDIAN_8b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Granite3GuardianModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts index 1eaa6ec1..3d36f89f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3-moe.ts @@ -70,21 +70,15 @@ export const GRANITE3_MOE_MODELS = [ // Manual type map for per-model provider options export type Granite3MoeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_MOE_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_MOE_1b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_MOE_3b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [GRANITE3_MOE_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_MOE_1b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_MOE_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Granite3MoeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts index 3cf4ce4c..4ed2cb39 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-dense.ts @@ -70,21 +70,15 @@ export const GRANITE3_1_DENSE_MODELS = [ // Manual type map for per-model provider options export type Granite3_1DenseChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_1_DENSE_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_1_DENSE_2b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_1_DENSE_8b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [GRANITE3_1_DENSE_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_1_DENSE_2b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_1_DENSE_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Granite3_1DenseModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts index e3b7792e..63a60e22 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-granite3.1-moe.ts @@ -70,21 +70,15 @@ export const GRANITE3_1_MOE_MODELS = [ // Manual type map for per-model provider options export type Granite3_1MoeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [GRANITE3_1_MOE_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_1_MOE_1b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [GRANITE3_1_MOE_3b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [GRANITE3_1_MOE_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_1_MOE_1b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [GRANITE3_1_MOE_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Granite3_1MoeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts index 7f87af9f..2de47c4d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama-guard3.ts @@ -62,15 +62,9 @@ export const LLAMA_GUARD3_MODELS = [ // Manual type map for per-model provider options export type LlamaGuard3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA_GUARD3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA_GUARD3_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA_GUARD3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA_GUARD3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA_GUARD3_1b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA_GUARD3_8b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type LlamaGuard3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts index 27be0978..3d7ee443 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama2.ts @@ -76,18 +76,10 @@ export const LLAMA2_MODELS = [ // Manual type map for per-model provider options export type Llama2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA2_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA2_13b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA2_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA2_7b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA2_13b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA2_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Llama2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts index ddf088b6..5333311a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-chatqa.ts @@ -60,15 +60,9 @@ export const LLAMA3_CHATQA_MODELS = [ // Manual type map for per-model provider options export type Llama3ChatQaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_CHATQA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_CHATQA_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_CHATQA_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA3_CHATQA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_CHATQA_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_CHATQA_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Llama3ChatQaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts index 3fb75963..f09446bd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3-gradient.ts @@ -60,15 +60,9 @@ export const LLAMA3_GRADIENT_MODELS = [ // Manual type map for per-model provider options export type Llama3GradientChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_GRADIENT_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_GRADIENT_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_GRADIENT_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA3_GRADIENT_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_GRADIENT_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_GRADIENT_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Llama3GradientModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts index 8696b509..84dc7d26 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.1.ts @@ -86,26 +86,18 @@ export const LLAMA3_1_MODELS = [ // Manual type map for per-model provider options export type Llama3_1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_1_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_1_8b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_1_70b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_1_405b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [LLAMA3_1_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_1_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_1_70b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_1_405b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Llama3_1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts index 3a94aec9..4431dd63 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2-vision.ts @@ -63,15 +63,12 @@ export const LLAMA3_2_VISION_MODELS = [ // Manual type map for per-model provider options export type Llama3_2VisionChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_2_VISION_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_2_VISION_11b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_2_VISION_90b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA3_2_VISION_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + [LLAMA3_2_VISION_11b.name]: OllamaChatRequest & + OllamaChatRequestMessages + [LLAMA3_2_VISION_90b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type Llama3_2VisionModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts index 206c5965..8eec88cd 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.2.ts @@ -70,21 +70,15 @@ export const LLAMA3_2_MODELS = [ // Manual type map for per-model provider options export type Llama3_2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_2_1b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_2_3b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [LLAMA3_2_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_2_1b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_2_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Llama3_2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts index 1ae18c80..3f6dfaa8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.3.ts @@ -54,16 +54,12 @@ export const LLAMA3_3_MODELS = [ // Manual type map for per-model provider options export type Llama3_3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA3_3_70b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [LLAMA3_3_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA3_3_70b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Llama3_3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts index 79f54d69..e764ebec 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama3.ts @@ -62,15 +62,9 @@ export const LLAMA3_MODELS = [ // Manual type map for per-model provider options export type Llama3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAMA3_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAMA3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [LLAMA3_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Llama3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts index 1f816118..74b79960 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -72,21 +72,15 @@ export const LLAMA4_MODELS = [ // Manual type map for per-model provider options export type Llama3_4ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAMA4_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA4_16X17b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAMA4_128X17b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [LLAMA4_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA4_16X17b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAMA4_128X17b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Llama3_4ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts index f8a90719..1e6b6f5a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-llama3.ts @@ -51,16 +51,12 @@ export const LLAVA_LLAMA3_MODELS = [ // Manual type map for per-model provider options export type LlavaLlamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_LLAMA3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [LLAVA_LLAMA3_8b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [LLAVA_LLAMA3_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [LLAVA_LLAMA3_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type LlavaLlamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts index be99034e..a01d6eaa 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava-phi3.ts @@ -49,12 +49,10 @@ export const LLAVA_PHI3_MODELS = [ // Manual type map for per-model provider options export type LlavaPhi3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_PHI3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAVA_PHI3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAVA_PHI3_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + [LLAVA_PHI3_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type LlavaPhi3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts index 371e7c3e..f8e63c64 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llava.ts @@ -77,18 +77,17 @@ export const LLAVA_MODELS = [ // Manual type map for per-model provider options export type llavaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [LLAVA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAVA_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAVA_13b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [LLAVA_34b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [LLAVA_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + + [LLAVA_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages + + [LLAVA_13b.name]: OllamaChatRequest & + OllamaChatRequestMessages + + [LLAVA_34b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type llavaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts index aaeb10a4..2a9d9b80 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-marco-o1.ts @@ -45,12 +45,8 @@ export const MARCO_O1_MODELS = [MARCO_O1_LATEST.name, MARCO_O1_7b.name] as const // Manual type map for per-model provider options export type MarcoO1ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MARCO_O1_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [MARCO_O1_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [MARCO_O1_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [MARCO_O1_7b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type MarcoO1ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts index 4c9d825c..aa403795 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-large.ts @@ -54,16 +54,12 @@ export const MISTRAL_LARGE_MODELS = [ // Manual type map for per-model provider options export type MistralLargeChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_LARGE_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MISTRAL_LARGE_123b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [MISTRAL_LARGE_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MISTRAL_LARGE_123b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type MistralLargeModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts index c1d3c4bd..1142949f 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-nemo.ts @@ -54,16 +54,12 @@ export const MISTRAL_NEMO_MODELS = [ // Manual type map for per-model provider options export type MistralNemoChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_NEMO_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MISTRAL_NEMO_12b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [MISTRAL_NEMO_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MISTRAL_NEMO_12b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type MistralNemoModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts index 2c48efb6..9a8f453d 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral-small.ts @@ -70,21 +70,15 @@ export const MISTRAL_SMALL_MODELS = [ // Manual type map for per-model provider options export type MistralSmallChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_SMALL_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MISTRAL_SMALL_22b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MISTRAL_SMALL_24b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [MISTRAL_SMALL_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MISTRAL_SMALL_22b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MISTRAL_SMALL_24b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type MistralSmallModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts index 51b7c27a..eb657ebb 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mistral.ts @@ -46,12 +46,11 @@ export const MISTRAL_MODELS = [MISTRAL_LATEST.name, MISTRAL_7b.name] as const // Manual type map for per-model provider options export type MistralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MISTRAL_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [MISTRAL_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [MISTRAL_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + + [MISTRAL_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type MistralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts index d1df33cb..df671729 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-mixtral.ts @@ -70,21 +70,15 @@ export const MIXTRAL_MODELS = [ // Manual type map for per-model provider options export type MixtralChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MIXTRAL_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MIXTRAL_8X7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [MIXTRAL_8X22b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [MIXTRAL_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MIXTRAL_8X7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [MIXTRAL_8X22b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type MixtralModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts index 3b99aedb..73ef34e6 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-moondream.ts @@ -49,12 +49,10 @@ export const MOONDREAM_MODELS = [ // Manual type map for per-model provider options export type MoondreamChatModelProviderOptionsByName = { // Models with thinking and structured output support - [MOONDREAM_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [MOONDREAM_1_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [MOONDREAM_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages + [MOONDREAM_1_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages } export type MoondreamModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts index 6cda8fb4..3502dbe5 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron-mini.ts @@ -54,16 +54,12 @@ export const NEMOTRON_MINI_MODELS = [ // Manual type map for per-model provider options export type NemotronMiniChatModelProviderOptionsByName = { // Models with thinking and structured output support - [NEMOTRON_MINI_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [NEMOTRON_MINI_4b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [NEMOTRON_MINI_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [NEMOTRON_MINI_4b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type NemotronMiniModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts index 58d3704e..358a0ce5 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-nemotron.ts @@ -54,16 +54,12 @@ export const NEMOTRON_MODELS = [ // Manual type map for per-model provider options export type NemotronChatModelProviderOptionsByName = { // Models with thinking and structured output support - [NEMOTRON_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [NEMOTRON_70b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [NEMOTRON_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [NEMOTRON_70b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type NemotronModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts index ab2794fd..42386f40 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-olmo2.ts @@ -62,15 +62,9 @@ export const OLMO2_MODELS = [ // Manual type map for per-model provider options export type Olmo2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OLMO2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OLMO2_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OLMO2_13b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [OLMO2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [OLMO2_7b.name]: OllamaChatRequest & OllamaChatRequestMessages + [OLMO2_13b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Olmo2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts index 114f2046..59192b92 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-opencoder.ts @@ -62,15 +62,9 @@ export const OPENCODER_MODELS = [ // Manual type map for per-model provider options export type OpencoderChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OPENCODER_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OPENCODER_1_5b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OPENCODER_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [OPENCODER_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [OPENCODER_1_5b.name]: OllamaChatRequest & OllamaChatRequestMessages + [OPENCODER_8b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type OpencoderModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts index 7c99a506..5f200ba0 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-openhermes.ts @@ -62,15 +62,9 @@ export const OPENHERMES_MODELS = [ // Manual type map for per-model provider options export type OpenhermesChatModelProviderOptionsByName = { // Models with thinking and structured output support - [OPENHERMES_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OPENHERMES_V2.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [OPENHERMES_V2_5.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [OPENHERMES_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [OPENHERMES_V2.name]: OllamaChatRequest & OllamaChatRequestMessages + [OPENHERMES_V2_5.name]: OllamaChatRequest & OllamaChatRequestMessages } export type OpenhermesModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts index c0660a12..3c95c6c9 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi3.ts @@ -62,15 +62,9 @@ export const PHI3_MODELS = [ // Manual type map for per-model provider options export type Phi3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [PHI3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [PHI3_3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [PHI3_14b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [PHI3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [PHI3_3_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [PHI3_14b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Phi3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts index b2dc3f7b..814376d1 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-phi4.ts @@ -45,12 +45,8 @@ export const PHI4_MODELS = [PHI4_LATEST.name, PHI4_14b.name] as const // Manual type map for per-model provider options export type Phi4ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [PHI4_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [PHI4_14b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [PHI4_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [PHI4_14b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Phi4ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts index 41d9ac3b..f8bd11a8 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen.ts @@ -146,29 +146,15 @@ export const QWEN_MODELS = [ // Manual type map for per-model provider options export type QwenChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_0_5b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_1_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_4b.name]: OllamaModelMeta - [QWEN_7b.name]: OllamaModelMeta - [QWEN_14b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_32b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_72b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [QWEN_110b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [QWEN_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_0_5b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_1_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_4b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_7b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_14b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_32b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_72b.name]: OllamaChatRequest & OllamaChatRequestMessages + [QWEN_110b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type QwenModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts index c794797f..499ebdb5 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5-coder.ts @@ -133,41 +133,27 @@ export const QWEN2_5_CODER_MODELS = [ // Manual type map for per-model provider options export type Qwen2_5CoderChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_5_CODER_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_0_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_1_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_3b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_14b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_CODER_32b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [QWEN2_5_CODER_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_0_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_1_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_14b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_CODER_32b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Qwen2_5CoderModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts index 0fbc6e9b..7321f1c0 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.5.ts @@ -134,41 +134,27 @@ export const QWEN2_5_MODELS = [ // Manual type map for per-model provider options export type Qwen2_5ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_5_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_0_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_1_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_3b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_32b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWEN2_5_72b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [QWEN2_5_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_0_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_1_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_3b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_32b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_5_72b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Qwen2_5ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts index 59554e65..a906792a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen2.ts @@ -102,35 +102,21 @@ export const QWEN2_MODELS = [ // Manual type map for per-model provider options export type Qwen2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - - [QWEN2_0_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - - [QWEN2_1_5b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - - [QWEN2_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - - [QWEN2_72b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [QWEN2_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_0_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_1_5b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWEN2_72b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type Qwen2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts index e1cd4a57..95be1766 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwen3.ts @@ -177,60 +177,42 @@ export const QWEN3_MODELS = [ // Manual type map for per-model provider options export type Qwen3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWEN3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_0_6b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_1_7b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_4b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_8b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_14b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_30b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_32b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > - [QWEN3_235b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools & - OllamaChatRequestThinking - > + [QWEN3_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_0_6b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_1_7b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_4b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_8b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_14b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_30b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_32b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking + [QWEN3_235b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools & + OllamaChatRequestThinking } export type Qwen3ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts index cd2519aa..b6a416d4 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-qwq.ts @@ -51,16 +51,12 @@ export const QWQ_MODELS = [QWQ_LATEST.name, QWQ_32b.name] as const // Manual type map for per-model provider options export type QwqChatModelProviderOptionsByName = { // Models with thinking and structured output support - [QWQ_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > - [QWQ_32b.name]: OllamaModelMeta< - OllamaChatRequest & - OllamaChatRequestMessages & - OllamaChatRequestTools - > + [QWQ_LATEST.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools + [QWQ_32b.name]: OllamaChatRequest & + OllamaChatRequestMessages & + OllamaChatRequestTools } export type QwqModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts index 12a948b6..963ff801 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-sailor2.ts @@ -74,18 +74,10 @@ export const SAILOR2_MODELS = [ // Manual type map for per-model provider options export type Sailor2ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SAILOR2_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SAILOR2_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SAILOR2_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SAILOR2_20b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [SAILOR2_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [SAILOR2_1b.name]: OllamaChatRequest & OllamaChatRequestMessages + [SAILOR2_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [SAILOR2_20b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Sailor2ModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts index 7a123bbb..91cc897a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-shieldgemma.ts @@ -76,18 +76,10 @@ export const SHIELDGEMMA_MODELS = [ // Manual type map for per-model provider options export type ShieldgemmaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SHIELDGEMMA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SHIELDGEMMA_2b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SHIELDGEMMA_9b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SHIELDGEMMA_27b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [SHIELDGEMMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [SHIELDGEMMA_2b.name]: OllamaChatRequest & OllamaChatRequestMessages + [SHIELDGEMMA_9b.name]: OllamaChatRequest & OllamaChatRequestMessages + [SHIELDGEMMA_27b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type ShieldgemmaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts index e16cab2b..a5923bee 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smalltinker.ts @@ -48,12 +48,8 @@ export const SMALLTINKER_MODELS = [ // Manual type map for per-model provider options export type SmalltinkerChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SMALLTINKER_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SMALLTINKER_3b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [SMALLTINKER_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [SMALLTINKER_3b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type SmalltinkerModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts index 09928c11..aaac4d7a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-smollm.ts @@ -76,18 +76,10 @@ export const SMOLLM_MODELS = [ // Manual type map for per-model provider options export type SmollmChatModelProviderOptionsByName = { // Models with thinking and structured output support - [SMOLLM_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SMOLLM_135m.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SMOLLM_360m.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [SMOLLM_1_7b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [SMOLLM_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [SMOLLM_135m.name]: OllamaChatRequest & OllamaChatRequestMessages + [SMOLLM_360m.name]: OllamaChatRequest & OllamaChatRequestMessages + [SMOLLM_1_7b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type SmollmModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts index 62f9efa6..dd19b63a 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tinyllama.ts @@ -48,12 +48,8 @@ export const TINNYLLAMA_MODELS = [ // Manual type map for per-model provider options export type TinnyllamaChatModelProviderOptionsByName = { // Models with thinking and structured output support - [TINNYLLAMA_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [TINNYLLAMA_1_1b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [TINNYLLAMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [TINNYLLAMA_1_1b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type TinnyllamaModelInputModalitiesByName = { diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts index ba860a6b..caeed497 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-tulu3.ts @@ -62,15 +62,9 @@ export const TULU3_MODELS = [ // Manual type map for per-model provider options export type Tulu3ChatModelProviderOptionsByName = { // Models with thinking and structured output support - [TULU3_LATEST.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [TULU3_8b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > - [TULU3_70b.name]: OllamaModelMeta< - OllamaChatRequest & OllamaChatRequestMessages - > + [TULU3_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages + [TULU3_8b.name]: OllamaChatRequest & OllamaChatRequestMessages + [TULU3_70b.name]: OllamaChatRequest & OllamaChatRequestMessages } export type Tulu3ModelInputModalitiesByName = { From 7ab302ae551d1ca241f754c0cb036d8c8ae0928f Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 23 Dec 2025 20:59:47 +0100 Subject: [PATCH 5/7] fix: unused errors and type corrections --- .../ai-ollama/src/meta/model-meta-deepseek-r1.ts | 14 +++++++------- .../ai-ollama/src/meta/model-meta-llama4.ts | 1 - 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts index 532b25cd..84277a69 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-deepseek-r1.ts @@ -144,31 +144,31 @@ export const DEEPSEEK_R1_MODELS = [ export type DeepseekR1ChatModelProviderOptionsByName = { // Models with thinking and structured output support [DEEPSEEK_R1_LATEST.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_1_5b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_7b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_8b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_32b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_70b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking [DEEPSEEK_R1_671b.name]: OllamaChatRequest & - OllamaChatRequestMessages & + OllamaChatRequestMessages & OllamaChatRequestTools & OllamaChatRequestThinking } diff --git a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts index 74b79960..06c1abd3 100644 --- a/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts +++ b/packages/typescript/ai-ollama/src/meta/model-meta-llama4.ts @@ -1,4 +1,3 @@ -import type { ChatRequest } from 'ollama' import type { OllamaChatRequest, OllamaChatRequestMessages, From fb43bae144095711686ecad2bd7fb69e5bee0b3a Mon Sep 17 00:00:00 2001 From: Harry Whorlow Date: Tue, 23 Dec 2025 21:13:53 +0100 Subject: [PATCH 6/7] stash --- packages/typescript/ai-ollama/src/adapters/summarize.ts | 2 +- packages/typescript/ai-ollama/src/adapters/text.ts | 6 ++++-- packages/typescript/ai-ollama/src/index.ts | 6 ++++-- packages/typescript/ai-ollama/src/model-meta.ts | 2 +- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index 82ec81a9..4278e498 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -5,7 +5,7 @@ import { getOllamaHostFromEnv, } from '../utils' -import type { OllamaTextModels as OllamaSummarizeModels } from '../model-meta' +import type { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from '../model-meta' import type { Ollama } from 'ollama' import type { SummarizeAdapter } from '@tanstack/ai/adapters' import type { diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 8675c4d0..82d9132e 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -2,7 +2,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' import { createOllamaClient, generateId, getOllamaHostFromEnv } from '../utils' -import type { OllamaTextModels } from '../model-meta' +import type { OLLAMA_TEXT_MODELS } from '../model-meta' import type { StructuredOutputOptions, StructuredOutputResult, @@ -18,7 +18,9 @@ import type { } from 'ollama' import type { StreamChunk, TextOptions, Tool } from '@tanstack/ai' -export type OllamaTextModel = (typeof OllamaTextModels)[number] | (string & {}) +export type OllamaTextModel = + | (typeof OLLAMA_TEXT_MODELS)[number] + | (string & {}) /** * Ollama-specific provider options diff --git a/packages/typescript/ai-ollama/src/index.ts b/packages/typescript/ai-ollama/src/index.ts index f49336c0..238194a2 100644 --- a/packages/typescript/ai-ollama/src/index.ts +++ b/packages/typescript/ai-ollama/src/index.ts @@ -11,7 +11,7 @@ export { type OllamaTextModel, type OllamaTextProviderOptions, } from './adapters/text' -export { OllamaTextModels } from './model-meta' +export { OLLAMA_TEXT_MODELS as OllamaTextModels } from './model-meta' // Summarize adapter export { @@ -22,7 +22,7 @@ export { type OllamaSummarizeModel, type OllamaSummarizeProviderOptions, } from './adapters/summarize' -export { OllamaTextModels as OllamaSummarizeModels } from './model-meta' +export { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from './model-meta' // =========================== // Type Exports @@ -35,3 +35,5 @@ export type { OllamaDocumentMetadata, OllamaMessageMetadataByModality, } from './message-types' + +export type { OllamaModelInputModalitiesByName } from './model-meta' diff --git a/packages/typescript/ai-ollama/src/model-meta.ts b/packages/typescript/ai-ollama/src/model-meta.ts index b02cb39e..8e2bdff1 100644 --- a/packages/typescript/ai-ollama/src/model-meta.ts +++ b/packages/typescript/ai-ollama/src/model-meta.ts @@ -130,7 +130,7 @@ import type { SmollmModelInputModalitiesByName } from './meta/model-meta-smollm' import type { TinnyllamaModelInputModalitiesByName } from './meta/model-meta-tinyllama' import type { Tulu3ModelInputModalitiesByName } from './meta/model-meta-tulu3' -export const OllamaTextModels = [ +export const OLLAMA_TEXT_MODELS = [ ...ATHENE_MODELS, ...AYA_MODELS, ...CODEGEMMA_MODELS, From 8e40285841dce77d49154f37696cc061a51fadb0 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 23 Dec 2025 20:15:29 +0000 Subject: [PATCH 7/7] ci: apply automated fixes --- packages/typescript/ai-ollama/src/adapters/summarize.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index 4278e498..cf17d681 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -48,9 +48,9 @@ export interface OllamaSummarizeAdapterOptions { * Ollama Summarize Adapter * A tree-shakeable summarization adapter for Ollama */ -export class OllamaSummarizeAdapter - implements SummarizeAdapter -{ +export class OllamaSummarizeAdapter< + TModel extends OllamaSummarizeModel, +> implements SummarizeAdapter { readonly kind = 'summarize' as const readonly name = 'ollama' as const readonly model: TModel