diff --git a/core/llm/llms/MiniMax.ts b/core/llm/llms/MiniMax.ts new file mode 100644 index 00000000000..c96200a53ca --- /dev/null +++ b/core/llm/llms/MiniMax.ts @@ -0,0 +1,40 @@ +import { ChatMessage, CompletionOptions, LLMOptions } from "../../index.js"; + +import OpenAI from "./OpenAI.js"; + +class MiniMax extends OpenAI { + static providerName = "minimax"; + static defaultOptions: Partial = { + apiBase: "https://api.minimax.io/v1/", + model: "MiniMax-M2.5", + useLegacyCompletionsEndpoint: false, + }; + + protected _convertArgs( + options: CompletionOptions, + messages: ChatMessage[], + ) { + const finalOptions = super._convertArgs(options, messages); + + // MiniMax requires temperature in (0.0, 1.0] — zero is rejected + if ( + finalOptions.temperature !== undefined && + finalOptions.temperature !== null + ) { + if (finalOptions.temperature <= 0) { + finalOptions.temperature = 0.01; + } else if (finalOptions.temperature > 1) { + finalOptions.temperature = 1.0; + } + } + + // MiniMax does not support response_format + if ((finalOptions as any).response_format) { + delete (finalOptions as any).response_format; + } + + return finalOptions; + } +} + +export default MiniMax; diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index 04f58e393de..a0711f1fbdd 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -38,6 +38,7 @@ import Lemonade from "./Lemonade"; import LMStudio from "./LMStudio"; import Mistral from "./Mistral"; import Mimo from "./Mimo"; +import MiniMax from "./MiniMax"; import MockLLM from "./Mock"; import Moonshot from "./Moonshot"; import Msty from "./Msty"; @@ -92,6 +93,7 @@ export const LLMClasses = [ LMStudio, Mistral, Mimo, + MiniMax, Bedrock, BedrockImport, SageMaker, diff --git a/docs/customize/model-providers/more/minimax.mdx b/docs/customize/model-providers/more/minimax.mdx new file mode 100644 index 00000000000..4b47dd96585 --- /dev/null +++ b/docs/customize/model-providers/more/minimax.mdx @@ -0,0 +1,53 @@ +--- +title: "How to Configure MiniMax with Continue" +sidebarTitle: "MiniMax" +--- + + + Get your API key from the [MiniMax Platform](https://platform.minimax.io) + + +## Configuration + + + + ```yaml title="config.yaml" + name: My Config + version: 0.0.1 + schema: v1 + + models: + - name: MiniMax M2.5 + provider: minimax + model: MiniMax-M2.5 + apiKey: + ``` + + + ```json title="config.json" + { + "models": [ + { + "title": "MiniMax M2.5", + "provider": "minimax", + "model": "MiniMax-M2.5", + "apiKey": "" + } + ] + } + ``` + + + +## Available Models + +| Model | Description | +| :---- | :---------- | +| `MiniMax-M2.5` | Peak performance with ultimate value. 204K context window. | +| `MiniMax-M2.5-highspeed` | Same performance, faster and more agile. 204K context window. | + +## Notes + +- MiniMax uses an OpenAI-compatible API at `https://api.minimax.io/v1` +- Set the `MINIMAX_API_KEY` environment variable or configure `apiKey` in your config +- For users in China, set `apiBase` to `https://api.minimaxi.com/v1/` diff --git a/docs/customize/model-providers/overview.mdx b/docs/customize/model-providers/overview.mdx index 4bbf1c0dc60..fcd99be669a 100644 --- a/docs/customize/model-providers/overview.mdx +++ b/docs/customize/model-providers/overview.mdx @@ -38,6 +38,7 @@ Beyond the top-level providers, Continue supports many other options: | [Cohere](/customize/model-providers/more/cohere) | Models specialized for semantic search and text generation | | [NVIDIA](/customize/model-providers/more/nvidia) | GPU-accelerated model hosting | | [Cloudflare](/customize/model-providers/more/cloudflare) | Edge-based AI inference services | +| [MiniMax](/customize/model-providers/more/minimax) | High-performance models with 200K+ context window | ### Local Model Options diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index d15db3f3d1d..52028bd965a 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -2688,6 +2688,30 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["sambanova"], isOpenSource: true, }, + minimaxM25: { + title: "MiniMax M2.5", + description: + "Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks with a 204K context window.", + params: { + title: "MiniMax M2.5", + model: "MiniMax-M2.5", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, + minimaxM25Highspeed: { + title: "MiniMax M2.5 Highspeed", + description: + "Same performance as M2.5, faster and more agile for latency-sensitive tasks with a 204K context window.", + params: { + title: "MiniMax M2.5 Highspeed", + model: "MiniMax-M2.5-highspeed", + contextLength: 204_800, + }, + providerOptions: ["minimax"], + isOpenSource: false, + }, AUTODETECT: { title: "Autodetect", description: diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 5dfb7220b14..348052e7dc0 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -579,6 +579,36 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n ], apiKeyUrl: "https://console.groq.com/keys", }, + minimax: { + title: "MiniMax", + provider: "minimax", + description: + "MiniMax offers high-performance models with 200K+ context windows at competitive pricing.", + longDescription: + "To get started with MiniMax, obtain an API key from the [MiniMax Platform](https://platform.minimax.io).", + tags: [ModelProviderTags.RequiresApiKey], + collectInputFor: [ + { + inputType: "text", + key: "apiKey", + label: "API Key", + placeholder: "Enter your MiniMax API key", + required: true, + }, + ], + packages: [ + models.minimaxM25, + models.minimaxM25Highspeed, + { + ...models.AUTODETECT, + params: { + ...models.AUTODETECT.params, + title: "MiniMax", + }, + }, + ], + apiKeyUrl: "https://platform.minimax.io", + }, deepseek: { title: "DeepSeek", provider: "deepseek", diff --git a/packages/config-types/src/index.ts b/packages/config-types/src/index.ts index a22bb00bb0b..8561500e662 100644 --- a/packages/config-types/src/index.ts +++ b/packages/config-types/src/index.ts @@ -61,6 +61,7 @@ export const modelDescriptionSchema = z.object({ "nebius", "scaleway", "watsonx", + "minimax", ]), model: z.string(), apiKey: z.string().optional(), diff --git a/packages/llm-info/src/index.ts b/packages/llm-info/src/index.ts index 52ca9f211cb..066b39c0ce2 100644 --- a/packages/llm-info/src/index.ts +++ b/packages/llm-info/src/index.ts @@ -4,6 +4,7 @@ import { Bedrock } from "./providers/bedrock.js"; import { Cohere } from "./providers/cohere.js"; import { CometAPI } from "./providers/cometapi.js"; import { Gemini } from "./providers/gemini.js"; +import { MiniMax } from "./providers/minimax.js"; import { Mistral } from "./providers/mistral.js"; import { Ollama } from "./providers/ollama.js"; import { OpenAi } from "./providers/openai.js"; @@ -25,6 +26,7 @@ export const allModelProviders: ModelProvider[] = [ Bedrock, Cohere, CometAPI, + MiniMax, xAI, zAI, ]; diff --git a/packages/llm-info/src/providers/minimax.ts b/packages/llm-info/src/providers/minimax.ts new file mode 100644 index 00000000000..ee157e8a86e --- /dev/null +++ b/packages/llm-info/src/providers/minimax.ts @@ -0,0 +1,28 @@ +import { ModelProvider } from "../types.js"; + +export const MiniMax: ModelProvider = { + models: [ + { + model: "MiniMax-M2.5", + displayName: "MiniMax M2.5", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks.", + regex: /MiniMax-M2\.5$/i, + recommendedFor: ["chat"], + }, + { + model: "MiniMax-M2.5-highspeed", + displayName: "MiniMax M2.5 Highspeed", + contextLength: 204800, + maxCompletionTokens: 192000, + description: + "Same performance as M2.5, faster and more agile for latency-sensitive tasks.", + regex: /MiniMax-M2\.5-highspeed/i, + recommendedFor: ["chat"], + }, + ], + id: "minimax", + displayName: "MiniMax", +}; diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index a00b95b81f7..50c937946b5 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -141,6 +141,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { return openAICompatible("http://localhost:8000/v1/", config); case "groq": return openAICompatible("https://api.groq.com/openai/v1/", config); + case "minimax": + return openAICompatible("https://api.minimax.io/v1/", config); case "sambanova": return openAICompatible("https://api.sambanova.ai/v1/", config); case "text-gen-webui":