Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ MaxKB = Max Knowledge Brain, it is an open-source platform for building enterpri
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, MiniMax, etc.).
- **Multi Modal**: Native support for input and output text, image, audio and video.

## Quick start
Expand Down
2 changes: 1 addition & 1 deletion README_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ MaxKB = Max Knowledge Brain,是一个强大易用的企业级智能体平台
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Qwen 3 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等)。
- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Qwen 3 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi / MiniMax 等)和国外公共大模型(OpenAI / Claude / Gemini 等)。

MaxKB 三分钟视频介绍:https://www.bilibili.com/video/BV18JypYeEkj/

Expand Down
2 changes: 2 additions & 0 deletions apps/models_provider/constants/model_provider_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from models_provider.impl.wenxin_model_provider.wenxin_model_provider import WenxinModelProvider
from models_provider.impl.xf_model_provider.xf_model_provider import XunFeiModelProvider
from models_provider.impl.xinference_model_provider.xinference_model_provider import XinferenceModelProvider
from models_provider.impl.minimax_model_provider.minimax_model_provider import MiniMaxModelProvider
from models_provider.impl.zhipu_model_provider.zhipu_model_provider import ZhiPuModelProvider


Expand All @@ -48,3 +49,4 @@ class ModelProvideConstants(Enum):
model_anthropic_provider = AnthropicModelProvider()
model_siliconCloud_provider = SiliconCloudModelProvider()
model_regolo_provider = RegoloModelProvider()
model_minimax_provider = MiniMaxModelProvider()
Empty file.
Empty file.
72 changes: 72 additions & 0 deletions apps/models_provider/impl/minimax_model_provider/credential/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# coding=utf-8
from typing import Dict

from django.utils.translation import gettext_lazy as _, gettext
from langchain_core.messages import HumanMessage

from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from models_provider.base_model_provider import BaseModelCredential, ValidCode
from common.utils.logger import maxkb_logger


class MiniMaxLLMModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=1.0,
_min=0.01,
_max=1.0,
_step=0.01,
precision=2)

max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=192000,
_step=1,
precision=0)


class MiniMaxLLMModelCredential(BaseForm, BaseModelCredential):

def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))

for key in ['api_key', 'api_base']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
model.invoke([HumanMessage(content=gettext('Hello'))])
except Exception as e:
maxkb_logger.error(f'Exception: {e}', exc_info=True)
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True

def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}

api_base = forms.TextInputField('API URL', required=True,
default_value='https://api.minimax.io/v1')
api_key = forms.PasswordInputField('API Key', required=True)

def get_model_params_setting_form(self, model_name):
return MiniMaxLLMModelParams()
67 changes: 67 additions & 0 deletions apps/models_provider/impl/minimax_model_provider/credential/tts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# coding=utf-8
from typing import Dict

from django.utils.translation import gettext_lazy as _, gettext

from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from models_provider.base_model_provider import BaseModelCredential, ValidCode
from common.utils.logger import maxkb_logger


class MiniMaxTTSModelGeneralParams(BaseForm):
voice_id = forms.SingleSelect(
TooltipLabel(_('Voice'),
_('Select a voice for speech synthesis')),
required=True, default_value='English_Graceful_Lady',
text_field='value',
value_field='value',
option_list=[
{'text': 'English_Graceful_Lady', 'value': 'English_Graceful_Lady'},
{'text': 'English_Insightful_Speaker', 'value': 'English_Insightful_Speaker'},
{'text': 'English_radiant_girl', 'value': 'English_radiant_girl'},
{'text': 'English_Persuasive_Man', 'value': 'English_Persuasive_Man'},
{'text': 'English_Lucky_Robot', 'value': 'English_Lucky_Robot'},
])


class MiniMaxTTSModelCredential(BaseForm, BaseModelCredential):
api_base = forms.TextInputField('API URL', required=True,
default_value='https://api.minimax.io/v1')
api_key = forms.PasswordInputField('API Key', required=True)

def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))

for key in ['api_base', 'api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
model.check_auth()
except Exception as e:
maxkb_logger.error(f'Exception: {e}', exc_info=True)
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True

def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}

def get_model_params_setting_form(self, model_name):
return MiniMaxTTSModelGeneralParams()
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none">
<rect width="24" height="24" rx="4" fill="#1A1A2E"/>
<text x="12" y="16" text-anchor="middle" font-family="Arial, sans-serif" font-weight="bold" font-size="10" fill="#E94560">M</text>
</svg>
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# coding=utf-8
import os

from common.utils.common import get_file_content
from models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \
ModelInfoManage
from models_provider.impl.minimax_model_provider.credential.llm import MiniMaxLLMModelCredential
from models_provider.impl.minimax_model_provider.credential.tts import MiniMaxTTSModelCredential
from models_provider.impl.minimax_model_provider.model.llm import MiniMaxChatModel
from models_provider.impl.minimax_model_provider.model.tts import MiniMaxTextToSpeech
from maxkb.conf import PROJECT_DIR
from django.utils.translation import gettext_lazy as _

minimax_llm_model_credential = MiniMaxLLMModelCredential()
minimax_tts_model_credential = MiniMaxTTSModelCredential()

minimax_m2_5 = ModelInfo('MiniMax-M2.5',
_('Peak Performance. Ultimate Value. 204K context window'),
ModelTypeConst.LLM,
minimax_llm_model_credential, MiniMaxChatModel)

minimax_m2_5_highspeed = ModelInfo('MiniMax-M2.5-highspeed',
_('Same performance, faster and more agile. 204K context window'),
ModelTypeConst.LLM,
minimax_llm_model_credential, MiniMaxChatModel)

minimax_tts_hd = ModelInfo('speech-2.8-hd',
_('Perfecting tonal nuances with maximized timbre similarity'),
ModelTypeConst.TTS,
minimax_tts_model_credential, MiniMaxTextToSpeech)

minimax_tts_turbo = ModelInfo('speech-2.8-turbo',
_('Faster, more affordable TTS model'),
ModelTypeConst.TTS,
minimax_tts_model_credential, MiniMaxTextToSpeech)

model_info_manage = (
ModelInfoManage.builder()
.append_model_info(minimax_m2_5)
.append_model_info(minimax_m2_5_highspeed)
.append_default_model_info(minimax_m2_5)
.append_model_info(minimax_tts_hd)
.append_model_info(minimax_tts_turbo)
.append_default_model_info(minimax_tts_hd)
.build()
)


class MiniMaxModelProvider(IModelProvider):

def get_model_info_manage(self):
return model_info_manage

def get_model_provide_info(self):
return ModelProvideInfo(provider='model_minimax_provider', name='MiniMax', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", 'models_provider', 'impl', 'minimax_model_provider', 'icon',
'minimax_icon_svg')))
Empty file.
23 changes: 23 additions & 0 deletions apps/models_provider/impl/minimax_model_provider/model/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# coding=utf-8
from typing import Dict

from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_chat_open_ai import BaseChatOpenAI


class MiniMaxChatModel(MaxKBBaseModel, BaseChatOpenAI):

@staticmethod
def is_cache_model():
return False

@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

return MiniMaxChatModel(
model=model_name,
openai_api_base=model_credential.get('api_base') or 'https://api.minimax.io/v1',
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
)
82 changes: 82 additions & 0 deletions apps/models_provider/impl/minimax_model_provider/model/tts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# coding=utf-8
from typing import Dict

import requests

from django.utils.translation import gettext as _

from common.utils.common import _remove_empty_lines
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tts import BaseTextToSpeech


class MiniMaxTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
api_base: str
api_key: str
model: str
params: dict

def __init__(self, **kwargs):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')
self.api_base = kwargs.get('api_base')
self.model = kwargs.get('model')
self.params = kwargs.get('params')

@staticmethod
def is_cache_model():
return False

@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'voice_id': 'English_Graceful_Lady'}}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming']:
optional_params['params'][key] = value
return MiniMaxTextToSpeech(
model=model_name,
api_base=model_credential.get('api_base') or 'https://api.minimax.io/v1',
api_key=model_credential.get('api_key'),
**optional_params,
)

def check_auth(self):
self.text_to_speech(_('Hello'))

def text_to_speech(self, text):
text = _remove_empty_lines(text)
api_base = self.api_base.rstrip('/')
url = f'{api_base}/t2a_v2'

voice_id = self.params.get('voice_id', 'English_Graceful_Lady')

payload = {
'model': self.model,
'text': text,
'stream': False,
'voice_setting': {
'voice_id': voice_id,
},
'audio_setting': {
'format': 'mp3',
},
}

headers = {
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
}

response = requests.post(url, json=payload, headers=headers, timeout=60)
response.raise_for_status()

result = response.json()
if result.get('base_resp', {}).get('status_code', 0) != 0:
error_msg = result.get('base_resp', {}).get('status_msg', 'Unknown error')
raise Exception(f'MiniMax TTS API error: {error_msg}')

audio_hex = result.get('data', {}).get('audio', '')
if not audio_hex:
raise Exception('MiniMax TTS API returned empty audio data')

return bytes.fromhex(audio_hex)