Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
262 changes: 44 additions & 218 deletions docker/.env.example
Original file line number Diff line number Diff line change
@@ -1,223 +1,49 @@
# MemOS Environment Variables (core runtime)
# Legend: [required] needed for default startup; others are optional or conditional per comments.

## Base
TZ=Asia/Shanghai
MOS_CUBE_PATH=/tmp/data_test # local data path
MEMOS_BASE_PATH=. # CLI/SDK cache path
MOS_ENABLE_DEFAULT_CUBE_CONFIG=true # enable default cube config
MOS_ENABLE_REORGANIZE=false # enable memory reorg
# MOS Text Memory Type
MOS_TEXT_MEM_TYPE=general_text # general_text | tree_text
ASYNC_MODE=sync # async/sync, used in default cube config

## User/session defaults
# Top-K for LLM in the Product API(old version)
MOS_TOP_K=50

## Chat LLM (main dialogue)
# LLM model name for the Product API
MOS_CHAT_MODEL=gpt-4o-mini
# Temperature for LLM in the Product API
MOS_CHAT_TEMPERATURE=0.8
# Max tokens for LLM in the Product API
MOS_MAX_TOKENS=2048
# Top-P for LLM in the Product API
MOS_TOP_P=0.9
# LLM for the Product API backend
MOS_CHAT_MODEL_PROVIDER=openai # openai | huggingface | vllm | minimax
OPENAI_API_KEY=sk-xxx # [required] when provider=openai
OPENAI_API_BASE=https://api.openai.com/v1 # [required] base for the key
# MiniMax LLM (when provider=minimax)
# MINIMAX_API_KEY=your-minimax-api-key # [required] when provider=minimax
# MINIMAX_API_BASE=https://api.minimax.io/v1 # base for MiniMax API

## MemReader / retrieval LLM
MEMRADER_MODEL=gpt-4o-mini
MEMRADER_API_KEY=sk-xxx # [required] can reuse OPENAI_API_KEY
MEMRADER_API_BASE=http://localhost:3000/v1 # [required] base for the key
MEMRADER_MAX_TOKENS=5000

## Embedding & rerank
# embedding dim
# Apply through Alibaba Cloud Bailian platform
# https://bailian.console.aliyun.com/?spm=a2c4g.11186623.0.0.2f2165b08fRk4l&tab=api#/api
# After successful application, obtain API_KEY and BASE_URL, example configuration below

# OpenAI API Key (use Bailian's API_KEY)
OPENAI_API_KEY=you_bailian_api_key
# OpenAI API Base URL
OPENAI_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
# Default model name
MOS_CHAT_MODEL=qwen3-max

# Memory Reader LLM Model
MEMRADER_MODEL=qwen3-max
# Memory Reader API Key (use Bailian's API_KEY)
MEMRADER_API_KEY=you_bailian_api_key
# Memory Reader API Base URL
MEMRADER_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1

# For Embedder model names, refer to the link below
# https://bailian.console.aliyun.com/?spm=a2c4g.11186623.0.0.2f2165b08fRk4l&tab=api#/api/?type=model&url=2846066
MOS_EMBEDDER_MODEL=text-embedding-v4
# Configure embedding backend, two options: ollama | universal_api
MOS_EMBEDDER_BACKEND=universal_api
# Embedder API Base URL
MOS_EMBEDDER_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
# Embedder API Key (use Bailian's API_KEY)
MOS_EMBEDDER_API_KEY=you_bailian_api_key
# Embedding vector dimension
EMBEDDING_DIMENSION=1024
# set default embedding backend
MOS_EMBEDDER_BACKEND=universal_api # universal_api | ollama
# set openai style
MOS_EMBEDDER_PROVIDER=openai # required when universal_api
# embedding model
MOS_EMBEDDER_MODEL=bge-m3 # siliconflow → use BAAI/bge-m3
# embedding url
MOS_EMBEDDER_API_BASE=http://localhost:8000/v1 # required when universal_api
# embedding model key
MOS_EMBEDDER_API_KEY=EMPTY # required when universal_api
OLLAMA_API_BASE=http://localhost:11434 # required when backend=ollama
# reranker config
MOS_RERANKER_BACKEND=http_bge # http_bge | http_bge_strategy | cosine_local
# reranker url
MOS_RERANKER_URL=http://localhost:8001 # required when backend=http_bge*
# reranker model
MOS_RERANKER_MODEL=bge-reranker-v2-m3 # siliconflow → use BAAI/bge-reranker-v2-m3
MOS_RERANKER_HEADERS_EXTRA= # extra headers, JSON string, e.g. {"Authorization":"Bearer your_token"}
# use source
MOS_RERANKER_STRATEGY=single_turn


# External Services (for evaluation scripts)
# API key for reproducting Zep(compertitor product) evaluation
ZEP_API_KEY=your_zep_api_key_here
# API key for reproducting Mem0(compertitor product) evaluation
MEM0_API_KEY=your_mem0_api_key_here
# API key for reproducting MemU(compertitor product) evaluation
MEMU_API_KEY=your_memu_api_key_here
# API key for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_API_KEY=your_memobase_api_key_here
# Project url for reproducting MEMOBASE(compertitor product) evaluation
MEMOBASE_PROJECT_URL=your_memobase_project_url_here
# LLM for evaluation
MODEL=gpt-4o-mini
# embedding model for evaluation
EMBEDDING_MODEL=nomic-embed-text:latest


## Internet search & preference memory
# Enable web search
ENABLE_INTERNET=false
# Internet search backend (bocha | tavily)
INTERNET_SEARCH_BACKEND=bocha
# API key for BOCHA Search
BOCHA_API_KEY= # required if ENABLE_INTERNET=true and backend=bocha
# API key for Tavily Search
TAVILY_API_KEY= # required if ENABLE_INTERNET=true and backend=tavily
# default search mode
SEARCH_MODE=fast # fast | fine | mixture
# Slow retrieval strategy configuration, rewrite is the rewrite strategy
FINE_STRATEGY=rewrite # rewrite | recreate | deep_search
# Whether to enable preference memory
ENABLE_PREFERENCE_MEMORY=true
# Preference Memory Add Mode
PREFERENCE_ADDER_MODE=fast # fast | safe
# Whether to deduplicate explicit preferences based on factual memory
DEDUP_PREF_EXP_BY_TEXTUAL=false

## Reader chunking
MEM_READER_BACKEND=simple_struct # simple_struct | strategy_struct
MEM_READER_CHAT_CHUNK_TYPE=default # default | content_length
MEM_READER_CHAT_CHUNK_TOKEN_SIZE=1600 # tokens per chunk (default mode)
MEM_READER_CHAT_CHUNK_SESS_SIZE=10 # sessions per chunk (default mode)
MEM_READER_CHAT_CHUNK_OVERLAP=2 # overlap between chunks

## Scheduler (MemScheduler / API)
# Enable or disable the main switch for configuring the memory scheduler during MemOS class initialization
MOS_ENABLE_SCHEDULER=false
# Determine the number of most relevant memory entries that the scheduler retrieves or processes during runtime (such as reordering or updating working memory)
MOS_SCHEDULER_TOP_K=10
# The time interval (in seconds) for updating "Activation Memory" (usually referring to caching or short-term memory mechanisms)
MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL=300
# The size of the context window considered by the scheduler when processing tasks (such as the number of recent messages or conversation rounds)
MOS_SCHEDULER_CONTEXT_WINDOW_SIZE=5
# The maximum number of working threads allowed in the scheduler thread pool for concurrent task execution
MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS=10000
# The polling interval (in seconds) for the scheduler to consume new messages/tasks from the queue. The smaller the value, the faster the response, but the CPU usage may be higher
MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS=0.01
# Whether to enable the parallel distribution function of the scheduler to improve the throughput of concurrent operations
MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH=true
# The specific switch to enable or disable the "Activate Memory" function in the scheduler logic
MOS_SCHEDULER_ENABLE_ACTIVATION_MEMORY=false
# Control whether the scheduler instance is actually started during server initialization. If false, the scheduler object may be created but its background loop will not be started
API_SCHEDULER_ON=true
# Specifically define the window size for API search operations in OptimizedScheduler. It is passed to the ScherderrAPIModule to control the scope of the search context
API_SEARCH_WINDOW_SIZE=5
# Specify how many rounds of previous conversations (history) to retrieve and consider during the 'hybrid search' (fast search+asynchronous fine search). This helps provide context aware search results
API_SEARCH_HISTORY_TURNS=5
MEMSCHEDULER_USE_REDIS_QUEUE=false

## Graph / vector stores
# Neo4j database selection mode
NEO4J_BACKEND=neo4j-community # neo4j-community | neo4j | polardb | postgres
# Neo4j database url
NEO4J_URI=bolt://localhost:7687 # required when backend=neo4j*
# Neo4j database user
NEO4J_USER=neo4j # required when backend=neo4j*
# Neo4j database password
NEO4J_PASSWORD=12345678 # required when backend=neo4j*
# Neo4j database name
NEO4J_DB_NAME=neo4j # required for shared-db mode
# Neo4j database data sharing with Memos
# Reranker Backend (http_bge | etc.)
MOS_RERANKER_BACKEND=cosine_local

# Neo4j Connection URI
# Available options: neo4j-community | neo4j | nebular | polardb
NEO4J_BACKEND=neo4j-community
# Required when backend=neo4j*
NEO4J_URI=bolt://localhost:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=12345678
NEO4J_DB_NAME=neo4j
MOS_NEO4J_SHARED_DB=false
QDRANT_HOST=localhost
QDRANT_PORT=6333
# For Qdrant Cloud / remote endpoint (takes priority if set):
QDRANT_URL=your_qdrant_url
QDRANT_API_KEY=your_qdrant_key
# milvus server uri
MILVUS_URI=http://localhost:19530 # required when ENABLE_PREFERENCE_MEMORY=true
MILVUS_USER_NAME=root # same as above
MILVUS_PASSWORD=12345678 # same as above

# PolarDB endpoint/host
POLAR_DB_HOST=localhost
# PolarDB port
POLAR_DB_PORT=5432
# PolarDB username
POLAR_DB_USER=root
# PolarDB password
POLAR_DB_PASSWORD=123456
# PolarDB database name
POLAR_DB_DB_NAME=shared_memos_db
# PolarDB Server Mode:
# If set to true, use Multi-Database Mode where each user has their own independent database (physical isolation).
# If set to false (default), use Shared Database Mode where all users share one database with logical isolation via username.
POLAR_DB_USE_MULTI_DB=false
# PolarDB connection pool size
POLARDB_POOL_MAX_CONN=100

## Related configurations of Redis
# Reddimq sends scheduling information and synchronization information for some variables
MEMSCHEDULER_REDIS_HOST= # fallback keys if not using the global ones
MEMSCHEDULER_REDIS_PORT=
MEMSCHEDULER_REDIS_DB=
MEMSCHEDULER_REDIS_PASSWORD=
MEMSCHEDULER_REDIS_TIMEOUT=
MEMSCHEDULER_REDIS_CONNECT_TIMEOUT=


## Nacos (optional config center)
# Nacos turns off long polling listening, defaults to true
NACOS_ENABLE_WATCH=false
# The monitoring interval for long rotation training is 60 seconds, and the default 30 seconds can be left unconfigured
NACOS_WATCH_INTERVAL=60
# nacos server address
NACOS_SERVER_ADDR=
# nacos dataid
NACOS_DATA_ID=
# nacos group
NACOS_GROUP=DEFAULT_GROUP
# nacos namespace
NACOS_NAMESPACE=
# nacos ak
AK=
# nacos sk
SK=
# Whether to use Redis schedule
DEFAULT_USE_REDIS_QUEUE=false

# chat model for chat api
CHAT_MODEL_LIST='[{
"backend": "deepseek",
"api_base": "http://localhost:1234",
"api_key": "your-api-key",
"model_name_or_path": "deepseek-r1",
"support_models": ["deepseek-r1"]
}]'
# Enable Chat API
ENABLE_CHAT_API=true

# RabbitMQ host name for message-log pipeline
MEMSCHEDULER_RABBITMQ_HOST_NAME=
# RabbitMQ user name for message-log pipeline
MEMSCHEDULER_RABBITMQ_USER_NAME=
# RabbitMQ password for message-log pipeline
MEMSCHEDULER_RABBITMQ_PASSWORD=
# RabbitMQ virtual host for message-log pipeline
MEMSCHEDULER_RABBITMQ_VIRTUAL_HOST=memos
# Erase connection state on connect for message-log pipeline
MEMSCHEDULER_RABBITMQ_ERASE_ON_CONNECT=true
# RabbitMQ port for message-log pipeline
MEMSCHEDULER_RABBITMQ_PORT=5672
CHAT_MODEL_LIST=[{"backend": "qwen", "api_base": "https://dashscope.aliyuncs.com/compatible-mode/v1", "api_key": "you_bailian_api_key", "model_name_or_path": "qwen3-max-preview", "extra_body": {"enable_thinking": true} ,"support_models": ["qwen3-max-preview"]}]
Loading
Loading