Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions core/llm/llms/MiniMax.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import { ChatMessage, CompletionOptions, LLMOptions } from "../../index.js";

import OpenAI from "./OpenAI.js";

class MiniMax extends OpenAI {
static providerName = "minimax";
static defaultOptions: Partial<LLMOptions> = {
apiBase: "https://api.minimax.io/v1/",
model: "MiniMax-M2.5",
useLegacyCompletionsEndpoint: false,
};

protected _convertArgs(
options: CompletionOptions,
messages: ChatMessage[],
) {
const finalOptions = super._convertArgs(options, messages);

// MiniMax requires temperature in (0.0, 1.0] — zero is rejected
if (
finalOptions.temperature !== undefined &&
finalOptions.temperature !== null
) {
if (finalOptions.temperature <= 0) {
finalOptions.temperature = 0.01;
} else if (finalOptions.temperature > 1) {
finalOptions.temperature = 1.0;
}
}

// MiniMax does not support response_format
if ((finalOptions as any).response_format) {
delete (finalOptions as any).response_format;
}

return finalOptions;
}
}

export default MiniMax;
2 changes: 2 additions & 0 deletions core/llm/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import Lemonade from "./Lemonade";
import LMStudio from "./LMStudio";
import Mistral from "./Mistral";
import Mimo from "./Mimo";
import MiniMax from "./MiniMax";
import MockLLM from "./Mock";
import Moonshot from "./Moonshot";
import Msty from "./Msty";
Expand Down Expand Up @@ -92,6 +93,7 @@ export const LLMClasses = [
LMStudio,
Mistral,
Mimo,
MiniMax,
Bedrock,
BedrockImport,
SageMaker,
Expand Down
53 changes: 53 additions & 0 deletions docs/customize/model-providers/more/minimax.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
---
title: "How to Configure MiniMax with Continue"
sidebarTitle: "MiniMax"
---

<Info>
Get your API key from the [MiniMax Platform](https://platform.minimax.io)
</Info>

## Configuration

<Tabs>
<Tab title="YAML">
```yaml title="config.yaml"
name: My Config
version: 0.0.1
schema: v1

models:
- name: MiniMax M2.5
provider: minimax
model: MiniMax-M2.5
apiKey: <YOUR_MINIMAX_API_KEY>
```
</Tab>
<Tab title="JSON (Deprecated)">
```json title="config.json"
{
"models": [
{
"title": "MiniMax M2.5",
"provider": "minimax",
"model": "MiniMax-M2.5",
"apiKey": "<YOUR_MINIMAX_API_KEY>"
}
]
}
```
</Tab>
</Tabs>

## Available Models

| Model | Description |
| :---- | :---------- |
| `MiniMax-M2.5` | Peak performance with ultimate value. 204K context window. |
| `MiniMax-M2.5-highspeed` | Same performance, faster and more agile. 204K context window. |

## Notes

- MiniMax uses an OpenAI-compatible API at `https://api.minimax.io/v1`
- Set the `MINIMAX_API_KEY` environment variable or configure `apiKey` in your config
- For users in China, set `apiBase` to `https://api.minimaxi.com/v1/`
1 change: 1 addition & 0 deletions docs/customize/model-providers/overview.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ Beyond the top-level providers, Continue supports many other options:
| [Cohere](/customize/model-providers/more/cohere) | Models specialized for semantic search and text generation |
| [NVIDIA](/customize/model-providers/more/nvidia) | GPU-accelerated model hosting |
| [Cloudflare](/customize/model-providers/more/cloudflare) | Edge-based AI inference services |
| [MiniMax](/customize/model-providers/more/minimax) | High-performance models with 200K+ context window |

### Local Model Options

Expand Down
24 changes: 24 additions & 0 deletions gui/src/pages/AddNewModel/configs/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2688,6 +2688,30 @@ export const models: { [key: string]: ModelPackage } = {
providerOptions: ["sambanova"],
isOpenSource: true,
},
minimaxM25: {
title: "MiniMax M2.5",
description:
"Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks with a 204K context window.",
params: {
title: "MiniMax M2.5",
model: "MiniMax-M2.5",
contextLength: 204_800,
},
providerOptions: ["minimax"],
isOpenSource: false,
},
minimaxM25Highspeed: {
title: "MiniMax M2.5 Highspeed",
description:
"Same performance as M2.5, faster and more agile for latency-sensitive tasks with a 204K context window.",
params: {
title: "MiniMax M2.5 Highspeed",
model: "MiniMax-M2.5-highspeed",
contextLength: 204_800,
},
providerOptions: ["minimax"],
isOpenSource: false,
},
AUTODETECT: {
title: "Autodetect",
description:
Expand Down
30 changes: 30 additions & 0 deletions gui/src/pages/AddNewModel/configs/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,36 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n
],
apiKeyUrl: "https://console.groq.com/keys",
},
minimax: {
title: "MiniMax",
provider: "minimax",
description:
"MiniMax offers high-performance models with 200K+ context windows at competitive pricing.",
longDescription:
"To get started with MiniMax, obtain an API key from the [MiniMax Platform](https://platform.minimax.io).",
tags: [ModelProviderTags.RequiresApiKey],
collectInputFor: [
{
inputType: "text",
key: "apiKey",
label: "API Key",
placeholder: "Enter your MiniMax API key",
required: true,
},
],
packages: [
models.minimaxM25,
models.minimaxM25Highspeed,
{
...models.AUTODETECT,
params: {
...models.AUTODETECT.params,
title: "MiniMax",
},
},
],
apiKeyUrl: "https://platform.minimax.io",
},
deepseek: {
title: "DeepSeek",
provider: "deepseek",
Expand Down
1 change: 1 addition & 0 deletions packages/config-types/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ export const modelDescriptionSchema = z.object({
"nebius",
"scaleway",
"watsonx",
"minimax",
]),
model: z.string(),
apiKey: z.string().optional(),
Expand Down
2 changes: 2 additions & 0 deletions packages/llm-info/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { Bedrock } from "./providers/bedrock.js";
import { Cohere } from "./providers/cohere.js";
import { CometAPI } from "./providers/cometapi.js";
import { Gemini } from "./providers/gemini.js";
import { MiniMax } from "./providers/minimax.js";
import { Mistral } from "./providers/mistral.js";
import { Ollama } from "./providers/ollama.js";
import { OpenAi } from "./providers/openai.js";
Expand All @@ -25,6 +26,7 @@ export const allModelProviders: ModelProvider[] = [
Bedrock,
Cohere,
CometAPI,
MiniMax,
xAI,
zAI,
];
Expand Down
28 changes: 28 additions & 0 deletions packages/llm-info/src/providers/minimax.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import { ModelProvider } from "../types.js";

export const MiniMax: ModelProvider = {
models: [
{
model: "MiniMax-M2.5",
displayName: "MiniMax M2.5",
contextLength: 204800,
maxCompletionTokens: 192000,
description:
"Peak performance with ultimate value. Excels at complex reasoning, code generation, and multi-step tasks.",
regex: /MiniMax-M2\.5$/i,
recommendedFor: ["chat"],
},
{
model: "MiniMax-M2.5-highspeed",
displayName: "MiniMax M2.5 Highspeed",
contextLength: 204800,
maxCompletionTokens: 192000,
description:
"Same performance as M2.5, faster and more agile for latency-sensitive tasks.",
regex: /MiniMax-M2\.5-highspeed/i,
recommendedFor: ["chat"],
},
],
id: "minimax",
displayName: "MiniMax",
};
2 changes: 2 additions & 0 deletions packages/openai-adapters/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined {
return openAICompatible("http://localhost:8000/v1/", config);
case "groq":
return openAICompatible("https://api.groq.com/openai/v1/", config);
case "minimax":
return openAICompatible("https://api.minimax.io/v1/", config);
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1: MiniMax is wired to generic OpenAIApi, which skips the repo’s MiniMax-specific request fixes (temperature clamping and response_format removal), creating a real incompatibility path in adapter-based runtime flows.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At packages/openai-adapters/src/index.ts, line 145:

<comment>MiniMax is wired to generic `OpenAIApi`, which skips the repo’s MiniMax-specific request fixes (temperature clamping and `response_format` removal), creating a real incompatibility path in adapter-based runtime flows.</comment>

<file context>
@@ -141,6 +141,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined {
     case "groq":
       return openAICompatible("https://api.groq.com/openai/v1/", config);
+    case "minimax":
+      return openAICompatible("https://api.minimax.io/v1/", config);
     case "sambanova":
       return openAICompatible("https://api.sambanova.ai/v1/", config);
</file context>
Fix with Cubic

case "sambanova":
return openAICompatible("https://api.sambanova.ai/v1/", config);
case "text-gen-webui":
Expand Down
Loading