Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 1 addition & 17 deletions packages/typescript/ai-ollama/src/adapters/summarize.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import {
getOllamaHostFromEnv,
} from '../utils'

import type { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from '../model-meta'
import type { Ollama } from 'ollama'
import type { SummarizeAdapter } from '@tanstack/ai/adapters'
import type {
Expand All @@ -13,23 +14,6 @@ import type {
SummarizationResult,
} from '@tanstack/ai'

/**
* Ollama models suitable for summarization
* Note: Ollama models are dynamically loaded, this is a common subset
*/
export const OllamaSummarizeModels = [
'llama2',
'llama3',
'llama3.1',
'llama3.2',
'mistral',
'mixtral',
'phi',
'phi3',
'qwen2',
'qwen2.5',
] as const

export type OllamaSummarizeModel =
| (typeof OllamaSummarizeModels)[number]
| (string & {})
Expand Down
32 changes: 4 additions & 28 deletions packages/typescript/ai-ollama/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters'

import { createOllamaClient, generateId, getOllamaHostFromEnv } from '../utils'

import type { OLLAMA_TEXT_MODELS } from '../model-meta'
import type {
StructuredOutputOptions,
StructuredOutputResult,
Expand All @@ -17,34 +18,9 @@ import type {
} from 'ollama'
import type { StreamChunk, TextOptions, Tool } from '@tanstack/ai'

/**
* Ollama text models
* Note: Ollama models are dynamically loaded, this is a common subset
*/
export const OllamaTextModels = [
'llama2',
'llama3',
'llama3.1',
'llama3.2',
'codellama',
'mistral',
'mixtral',
'phi',
'phi3',
'neural-chat',
'starling-lm',
'orca-mini',
'vicuna',
'nous-hermes',
'qwen2',
'qwen2.5',
'gemma',
'gemma2',
'deepseek-coder',
'command-r',
] as const

export type OllamaTextModel = (typeof OllamaTextModels)[number] | (string & {})
export type OllamaTextModel =
| (typeof OLLAMA_TEXT_MODELS)[number]
| (string & {})

/**
* Ollama-specific provider options
Expand Down
6 changes: 4 additions & 2 deletions packages/typescript/ai-ollama/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,24 @@
// Text/Chat adapter
export {
OllamaTextAdapter,
OllamaTextModels,
createOllamaChat,
ollamaText,
type OllamaTextAdapterOptions,
type OllamaTextModel,
type OllamaTextProviderOptions,
} from './adapters/text'
export { OLLAMA_TEXT_MODELS as OllamaTextModels } from './model-meta'

// Summarize adapter
export {
OllamaSummarizeAdapter,
OllamaSummarizeModels,
createOllamaSummarize,
ollamaSummarize,
type OllamaSummarizeAdapterOptions,
type OllamaSummarizeModel,
type OllamaSummarizeProviderOptions,
} from './adapters/summarize'
export { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from './model-meta'

// ===========================
// Type Exports
Expand All @@ -35,3 +35,5 @@ export type {
OllamaDocumentMetadata,
OllamaMessageMetadataByModality,
} from './message-types'

export type { OllamaModelInputModalitiesByName } from './model-meta'
69 changes: 69 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-athene.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import type {
OllamaChatRequest,
OllamaChatRequestMessages,
OllamaChatRequestTools,
OllamaMessageTools,
OllamaModelMeta,
} from './models-meta'

const ATHENE_V2_LATEST = {
name: 'athene-v2:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '47gb',
context: 32_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest &
OllamaChatRequestMessages<OllamaMessageTools> &
OllamaChatRequestTools
>

const ATHENE_V2_72b = {
name: 'athene-v2:72b',
supports: {
input: ['text'],
output: ['text'],
capabilities: ['tools'],
},
size: '47gb',
context: 32_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest &
OllamaChatRequestMessages<OllamaMessageTools> &
OllamaChatRequestTools
>

export const ATHENE_MODELS = [
ATHENE_V2_LATEST.name,
ATHENE_V2_72b.name,
] as const

// const ATHENE_IMAGE_MODELS = [] as const

// export const ATHENE_EMBEDDING_MODELS = [] as const

// const ATHENE_AUDIO_MODELS = [] as const

// const ATHENE_VIDEO_MODELS = [] as const

// export type AtheneChatModels = (typeof ATHENE_MODELS)[number]

// Manual type map for per-model provider options
export type AtheneChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[ATHENE_V2_LATEST.name]: OllamaChatRequest &
OllamaChatRequestMessages<OllamaMessageTools> &
OllamaChatRequestTools
[ATHENE_V2_72b.name]: OllamaChatRequest &
OllamaChatRequestMessages<OllamaMessageTools> &
OllamaChatRequestTools
}

export type AtheneModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input
[ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input
}
71 changes: 71 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-aya.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import type {
OllamaChatRequest,
OllamaChatRequestMessages,
OllamaModelMeta,
} from './models-meta'

const AYA_LATEST = {
name: 'aya:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '4.8gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

const AYA_8b = {
name: 'aya:8b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '4.8gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

const AYA_35b = {
name: 'aya:35b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '20gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const

// const AYA_IMAGE_MODELS = [] as const

// export const AYA_EMBEDDING_MODELS = [] as const

// const AYA_AUDIO_MODELS = [] as const

// const AYA_VIDEO_MODELS = [] as const

// export type AyaChatModels = (typeof AYA_MODELS)[number]

// Manual type map for per-model provider options
export type AyaChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[AYA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages
[AYA_8b.name]: OllamaChatRequest & OllamaChatRequestMessages
[AYA_35b.name]: OllamaChatRequest & OllamaChatRequestMessages
}

export type AyaModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[AYA_LATEST.name]: typeof AYA_LATEST.supports.input
[AYA_8b.name]: typeof AYA_8b.supports.input
[AYA_35b.name]: typeof AYA_35b.supports.input
}
75 changes: 75 additions & 0 deletions packages/typescript/ai-ollama/src/meta/model-meta-codegemma.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import type {
OllamaChatRequest,
OllamaChatRequestMessages,
OllamaModelMeta,
} from './models-meta'

const CODEGEMMA_LATEST = {
name: 'codegemma:latest',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '5gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

const CODEGEMMA_8b = {
name: 'codegemma:2b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '1.65gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

const CODEGEMMA_35b = {
name: 'codegemma:7b',
supports: {
input: ['text'],
output: ['text'],
capabilities: [],
},
size: '5gb',
context: 8_000,
} as const satisfies OllamaModelMeta<
OllamaChatRequest & OllamaChatRequestMessages
>

export const CODEGEMMA_MODELS = [
CODEGEMMA_LATEST.name,
CODEGEMMA_8b.name,
CODEGEMMA_35b.name,
] as const

// const CODEGEMMA_IMAGE_MODELS = [] as const

// export const CODEGEMMA_EMBEDDING_MODELS = [] as const

// const CODEGEMMA_AUDIO_MODELS = [] as const

// const CODEGEMMA_VIDEO_MODELS = [] as const

// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number]

// Manual type map for per-model provider options
export type CodegemmaChatModelProviderOptionsByName = {
// Models with thinking and structured output support
[CODEGEMMA_LATEST.name]: OllamaChatRequest & OllamaChatRequestMessages
[CODEGEMMA_8b.name]: OllamaChatRequest & OllamaChatRequestMessages
[CODEGEMMA_35b.name]: OllamaChatRequest & OllamaChatRequestMessages
}

export type CodegemmaModelInputModalitiesByName = {
// Models with text, image, audio, video (no document)
[CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input
[CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input
[CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input
}
Loading
Loading