diff --git a/tests/unit/test_platform_base.py b/tests/unit/test_platform_base.py new file mode 100644 index 000000000..081c0c3f9 --- /dev/null +++ b/tests/unit/test_platform_base.py @@ -0,0 +1,348 @@ +"""Tests for Platform base class.""" + +import asyncio +from datetime import datetime +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from astrbot.core.platform.platform import Platform, PlatformError, PlatformStatus +from astrbot.core.platform.platform_metadata import PlatformMetadata + + +class ConcretePlatform(Platform): + """Concrete implementation of Platform for testing purposes.""" + + def __init__(self, config: dict, event_queue: asyncio.Queue) -> None: + super().__init__(config, event_queue) + self._meta = PlatformMetadata( + name="test_platform", + description="Test platform for unit testing", + id="test_platform_id", + ) + + def run(self): + """Return a coroutine for running the platform.""" + return self._run_impl() + + async def _run_impl(self): + """Implementation of run method.""" + await asyncio.Future() # Never completes + + def meta(self) -> PlatformMetadata: + """Return platform metadata.""" + return self._meta + + +@pytest.fixture +def event_queue(): + """Create an event queue for testing.""" + return asyncio.Queue() + + +@pytest.fixture +def platform_config(): + """Create a platform configuration for testing.""" + return { + "id": "test_platform_id", + "type": "test_platform", + "enable": True, + } + + +@pytest.fixture +def platform(event_queue, platform_config): + """Create a concrete platform instance for testing.""" + return ConcretePlatform(platform_config, event_queue) + + +class TestPlatformInit: + """Tests for Platform initialization.""" + + def test_init_basic(self, event_queue, platform_config): + """Test basic Platform initialization.""" + platform = ConcretePlatform(platform_config, event_queue) + + assert platform.config == platform_config + assert platform._event_queue == event_queue + assert platform.client_self_id is not None + assert len(platform.client_self_id) == 32 # uuid.hex length + + def test_init_status_pending(self, platform): + """Test that initial status is PENDING.""" + assert platform.status == PlatformStatus.PENDING + + def test_init_empty_errors(self, platform): + """Test that initial errors list is empty.""" + assert platform.errors == [] + assert platform.last_error is None + + def test_init_started_at_none(self, platform): + """Test that started_at is None initially.""" + assert platform._started_at is None + + +class TestPlatformStatus: + """Tests for Platform status property.""" + + def test_status_getter(self, platform): + """Test status getter returns current status.""" + assert platform.status == PlatformStatus.PENDING + + def test_status_setter_to_running(self, platform): + """Test setting status to RUNNING sets started_at.""" + platform.status = PlatformStatus.RUNNING + + assert platform.status == PlatformStatus.RUNNING + assert platform._started_at is not None + assert isinstance(platform._started_at, datetime) + + def test_status_setter_running_only_sets_started_at_once(self, platform): + """Test that started_at is only set once when status becomes RUNNING.""" + first_time = datetime(2020, 1, 1) + platform._started_at = first_time + + platform.status = PlatformStatus.RUNNING + + assert platform._started_at == first_time + + def test_status_setter_to_error(self, platform): + """Test setting status to ERROR.""" + platform.status = PlatformStatus.ERROR + assert platform.status == PlatformStatus.ERROR + + def test_status_setter_to_stopped(self, platform): + """Test setting status to STOPPED.""" + platform.status = PlatformStatus.STOPPED + assert platform.status == PlatformStatus.STOPPED + + +class TestPlatformErrors: + """Tests for Platform error handling.""" + + def test_errors_property_returns_list(self, platform): + """Test errors property returns the errors list.""" + assert platform.errors == [] + + def test_last_error_returns_none_when_empty(self, platform): + """Test last_error returns None when no errors.""" + assert platform.last_error is None + + def test_record_error_adds_to_list(self, platform): + """Test record_error adds error to the list.""" + platform.record_error("Test error message") + + assert len(platform.errors) == 1 + assert platform.errors[0].message == "Test error message" + assert platform.errors[0].traceback is None + + def test_record_error_with_traceback(self, platform): + """Test record_error with traceback.""" + platform.record_error("Error with traceback", "Line 1\nLine 2") + + assert platform.errors[0].traceback == "Line 1\nLine 2" + + def test_record_error_sets_status_to_error(self, platform): + """Test record_error sets status to ERROR.""" + platform.record_error("Test error") + assert platform.status == PlatformStatus.ERROR + + def test_last_error_returns_most_recent(self, platform): + """Test last_error returns the most recent error.""" + platform.record_error("First error") + platform.record_error("Second error") + + assert platform.last_error.message == "Second error" + + def test_clear_errors_removes_all_errors(self, platform): + """Test clear_errors removes all errors.""" + platform.record_error("Error 1") + platform.record_error("Error 2") + platform.clear_errors() + + assert platform.errors == [] + assert platform.last_error is None + + def test_clear_errors_resets_status_from_error_to_running(self, platform): + """Test clear_errors resets status from ERROR to RUNNING.""" + platform.record_error("Error") + assert platform.status == PlatformStatus.ERROR + + platform.clear_errors() + assert platform.status == PlatformStatus.RUNNING + + def test_clear_errors_does_not_change_status_if_not_error(self, platform): + """Test clear_errors doesn't change status if not ERROR.""" + platform.status = PlatformStatus.STOPPED + platform.clear_errors() + + assert platform.status == PlatformStatus.STOPPED + + +class TestPlatformError: + """Tests for PlatformError dataclass.""" + + def test_platform_error_creation(self): + """Test creating a PlatformError.""" + error = PlatformError(message="Test error") + + assert error.message == "Test error" + assert error.timestamp is not None + assert isinstance(error.timestamp, datetime) + assert error.traceback is None + + def test_platform_error_with_traceback(self): + """Test creating a PlatformError with traceback.""" + error = PlatformError(message="Error", traceback="Stack trace here") + + assert error.traceback == "Stack trace here" + + +class TestUnifiedWebhook: + """Tests for unified_webhook method.""" + + def test_unified_webhook_false_by_default(self, platform): + """Test unified_webhook returns False by default.""" + assert platform.unified_webhook() is False + + def test_unified_webhook_true_when_configured(self, event_queue): + """Test unified_webhook returns True when properly configured.""" + config = { + "unified_webhook_mode": True, + "webhook_uuid": "test-uuid-123", + } + platform = ConcretePlatform(config, event_queue) + + assert platform.unified_webhook() is True + + def test_unified_webhook_false_when_missing_uuid(self, event_queue): + """Test unified_webhook returns False when webhook_uuid is missing.""" + config = {"unified_webhook_mode": True} + platform = ConcretePlatform(config, event_queue) + + assert platform.unified_webhook() is False + + def test_unified_webhook_false_when_mode_disabled(self, event_queue): + """Test unified_webhook returns False when mode is disabled.""" + config = { + "unified_webhook_mode": False, + "webhook_uuid": "test-uuid-123", + } + platform = ConcretePlatform(config, event_queue) + + assert platform.unified_webhook() is False + + +class TestGetStats: + """Tests for get_stats method.""" + + def test_get_stats_basic(self, platform): + """Test get_stats returns basic statistics.""" + stats = platform.get_stats() + + assert stats["id"] == "test_platform_id" + assert stats["type"] == "test_platform" + assert stats["status"] == PlatformStatus.PENDING.value + assert stats["error_count"] == 0 + assert stats["last_error"] is None + assert stats["unified_webhook"] is False + + def test_get_stats_with_running_status(self, platform): + """Test get_stats with RUNNING status includes started_at.""" + platform.status = PlatformStatus.RUNNING + stats = platform.get_stats() + + assert stats["status"] == PlatformStatus.RUNNING.value + assert stats["started_at"] is not None + + def test_get_stats_with_errors(self, platform): + """Test get_stats includes error information.""" + platform.record_error("Test error", "Traceback info") + stats = platform.get_stats() + + assert stats["error_count"] == 1 + assert stats["last_error"] is not None + assert stats["last_error"]["message"] == "Test error" + assert stats["last_error"]["traceback"] == "Traceback info" + + def test_get_stats_meta_info(self, platform): + """Test get_stats includes metadata information.""" + stats = platform.get_stats() + + assert "meta" in stats + assert stats["meta"]["name"] == "test_platform" + assert stats["meta"]["id"] == "test_platform_id" + + +class TestWebhookCallback: + """Tests for webhook_callback method.""" + + @pytest.mark.asyncio + async def test_webhook_callback_raises_not_implemented(self, platform): + """Test webhook_callback raises NotImplementedError by default.""" + mock_request = MagicMock() + + with pytest.raises(NotImplementedError) as exc_info: + await platform.webhook_callback(mock_request) + + assert "未实现统一 Webhook 模式" in str(exc_info.value) + + +class TestCommitEvent: + """Tests for commit_event method.""" + + def test_commit_event_puts_in_queue(self, platform, event_queue): + """Test commit_event puts event in the queue.""" + mock_event = MagicMock() + platform.commit_event(mock_event) + + assert event_queue.qsize() == 1 + assert event_queue.get_nowait() == mock_event + + +class TestTerminate: + """Tests for terminate method.""" + + @pytest.mark.asyncio + async def test_terminate_default_implementation(self, platform): + """Test terminate method has default empty implementation.""" + # Should not raise any exception + await platform.terminate() + + +class TestGetClient: + """Tests for get_client method.""" + + def test_get_client_default_returns_none(self, platform): + """Test get_client returns None by default.""" + result = platform.get_client() + assert result is None + + +class TestSendBySession: + """Tests for send_by_session method.""" + + @pytest.mark.asyncio + async def test_send_by_session_default_implementation(self, platform): + """Test send_by_session default implementation.""" + mock_session = MagicMock() + mock_message_chain = MagicMock() + + with patch( + "astrbot.core.platform.platform.Metric.upload", new_callable=AsyncMock + ) as mock_upload: + await platform.send_by_session(mock_session, mock_message_chain) + mock_upload.assert_awaited_once_with( + msg_event_tick=1, adapter_name="test_platform" + ) + + +class TestPlatformStatusEnum: + """Tests for PlatformStatus enum.""" + + def test_platform_status_values(self): + """Test PlatformStatus enum values.""" + assert PlatformStatus.PENDING.value == "pending" + assert PlatformStatus.RUNNING.value == "running" + assert PlatformStatus.ERROR.value == "error" + assert PlatformStatus.STOPPED.value == "stopped" diff --git a/tests/unit/test_platform_manager.py b/tests/unit/test_platform_manager.py new file mode 100644 index 000000000..7c098632d --- /dev/null +++ b/tests/unit/test_platform_manager.py @@ -0,0 +1,465 @@ +"""Tests for platform register and manager functions.""" + +from __future__ import annotations + +import subprocess +import sys +import textwrap +from pathlib import Path + +import pytest + +from astrbot.core.platform.register import ( + platform_cls_map, + platform_registry, + register_platform_adapter, + unregister_platform_adapters_by_module, +) + + +def _run_python(code: str) -> subprocess.CompletedProcess[str]: + repo_root = Path(__file__).resolve().parents[2] + return subprocess.run( + [sys.executable, "-c", textwrap.dedent(code)], + cwd=repo_root, + capture_output=True, + text=True, + check=False, + ) + + +def _assert_platform_manager_case(case: str) -> None: + code = f""" + import asyncio + + case = {case!r} + + from astrbot.core.platform.manager import PlatformManager + from astrbot.core.platform.platform import PlatformStatus + + + class DummyConfig(dict): + def save_config(self): + self["_saved"] = True + + + def make_manager(): + cfg = DummyConfig({{"platform": [], "platform_settings": {{}}}}) + return PlatformManager(cfg, asyncio.Queue()) + + + if case == "is_valid_platform_id_valid": + manager = make_manager() + assert manager._is_valid_platform_id("platform_1") + assert manager._is_valid_platform_id("a-b") + assert manager._is_valid_platform_id("A1") + + elif case == "is_valid_platform_id_invalid": + manager = make_manager() + assert manager._is_valid_platform_id(None) is False + assert manager._is_valid_platform_id("") is False + assert manager._is_valid_platform_id("a:b") is False + assert manager._is_valid_platform_id("a!b") is False + + elif case == "sanitize_platform_id": + manager = make_manager() + assert manager._sanitize_platform_id("a:b!c") == ("a_b_c", True) + assert manager._sanitize_platform_id("abc") == ("abc", False) + assert manager._sanitize_platform_id(None) == (None, False) + + elif case == "platform_manager_init": + manager = make_manager() + assert manager.platform_insts == [] + assert manager._inst_map == {{}} + assert manager.get_insts() == [] + assert manager.platforms_config == [] + assert manager.settings == {{}} + + elif case == "get_all_stats_empty": + manager = make_manager() + stats = manager.get_all_stats() + assert stats["summary"]["total"] == 0 + assert stats["summary"]["running"] == 0 + assert stats["summary"]["error"] == 0 + assert stats["summary"]["total_errors"] == 0 + + elif case == "get_all_stats_with_platforms": + manager = make_manager() + + class RunningInst: + def get_stats(self): + return {{ + "id": "p1", + "status": PlatformStatus.RUNNING.value, + "error_count": 1, + }} + + class ErrorInst: + def get_stats(self): + return {{ + "id": "p2", + "status": PlatformStatus.ERROR.value, + "error_count": 2, + }} + + manager.platform_insts = [RunningInst(), ErrorInst()] + stats = manager.get_all_stats() + assert stats["summary"]["total"] == 2 + assert stats["summary"]["running"] == 1 + assert stats["summary"]["error"] == 1 + assert stats["summary"]["total_errors"] == 3 + assert len(stats["platforms"]) == 2 + + elif case == "get_insts_empty": + manager = make_manager() + assert manager.get_insts() == [] + + elif case == "get_insts_returns_platforms": + manager = make_manager() + p1, p2 = object(), object() + manager.platform_insts = [p1, p2] + insts = manager.get_insts() + assert len(insts) == 2 + assert insts[0] is p1 + assert insts[1] is p2 + + else: + raise AssertionError(f"Unknown case: {{case}}") + """ + proc = _run_python(code) + assert proc.returncode == 0, ( + "PlatformManager subprocess test failed.\n" + f"case={case}\n" + f"stdout:\n{proc.stdout}\n" + f"stderr:\n{proc.stderr}\n" + ) + + +@pytest.fixture(autouse=True) +def _isolate_platform_registry(): + """Isolate global platform registry state between tests.""" + original_registry = platform_registry.copy() + original_cls_map = platform_cls_map.copy() + platform_registry.clear() + platform_cls_map.clear() + try: + yield + finally: + platform_registry.clear() + platform_cls_map.clear() + platform_registry.extend(original_registry) + platform_cls_map.update(original_cls_map) + + +class TestRegisterPlatformAdapter: + """Tests for register_platform_adapter decorator.""" + + def test_register_platform_adapter_basic(self): + """Test basic platform adapter registration.""" + + @register_platform_adapter( + adapter_name="test_adapter", + desc="Test adapter description", + ) + class TestAdapter: + pass + + assert "test_adapter" in platform_cls_map + assert platform_cls_map["test_adapter"] == TestAdapter + + # Check registry entry + assert len(platform_registry) == 1 + meta = platform_registry[0] + assert meta.name == "test_adapter" + assert meta.description == "Test adapter description" + assert meta.id == "test_adapter" + + def test_register_platform_adapter_with_config_template(self): + """Test registration with default config template.""" + config_tmpl = {"token": "", "secret": ""} + + @register_platform_adapter( + adapter_name="test_adapter_config", + desc="Test adapter with config", + default_config_tmpl=config_tmpl, + ) + class TestAdapterConfig: + pass + + meta = platform_registry[0] + # Should add type, enable, and id to config template + assert meta.default_config_tmpl is not None + assert meta.default_config_tmpl["type"] == "test_adapter_config" + assert meta.default_config_tmpl["enable"] is False + assert meta.default_config_tmpl["id"] == "test_adapter_config" + assert meta.default_config_tmpl["token"] == "" + + def test_register_platform_adapter_with_display_name(self): + """Test registration with display name.""" + + @register_platform_adapter( + adapter_name="test_adapter_display", + desc="Test adapter", + adapter_display_name="My Custom Adapter", + ) + class TestAdapterDisplay: + pass + + meta = platform_registry[0] + assert meta.adapter_display_name == "My Custom Adapter" + + def test_register_platform_adapter_with_logo_path(self): + """Test registration with logo path.""" + + @register_platform_adapter( + adapter_name="test_adapter_logo", + desc="Test adapter", + logo_path="logos/adapter.png", + ) + class TestAdapterLogo: + pass + + meta = platform_registry[0] + assert meta.logo_path == "logos/adapter.png" + + def test_register_platform_adapter_with_streaming_flag(self): + """Test registration with streaming message flag.""" + + @register_platform_adapter( + adapter_name="test_adapter_streaming", + desc="Test adapter", + support_streaming_message=False, + ) + class TestAdapterStreaming: + pass + + meta = platform_registry[0] + assert meta.support_streaming_message is False + + def test_register_platform_adapter_with_i18n_resources(self): + """Test registration with i18n resources.""" + i18n = {"zh-CN": {"name": "测试"}} + + @register_platform_adapter( + adapter_name="test_adapter_i18n", + desc="Test adapter", + i18n_resources=i18n, + ) + class TestAdapterI18n: + pass + + meta = platform_registry[0] + assert meta.i18n_resources == i18n + + def test_register_platform_adapter_with_config_metadata(self): + """Test registration with config metadata.""" + config_meta = {"fields": []} + + @register_platform_adapter( + adapter_name="test_adapter_meta", + desc="Test adapter", + config_metadata=config_meta, + ) + class TestAdapterMeta: + pass + + meta = platform_registry[0] + assert meta.config_metadata == config_meta + + def test_register_platform_adapter_duplicate_raises_error(self): + """Test that duplicate registration raises ValueError.""" + + @register_platform_adapter( + adapter_name="duplicate_adapter", + desc="First registration", + ) + class FirstAdapter: + pass + + with pytest.raises(ValueError) as exc_info: + + @register_platform_adapter( + adapter_name="duplicate_adapter", + desc="Second registration", + ) + class SecondAdapter: # noqa: F811 + pass + + assert "已经注册过" in str(exc_info.value) + + def test_register_platform_adapter_module_path_captured(self): + """Test that module path is captured.""" + + @register_platform_adapter( + adapter_name="test_adapter_module", + desc="Test adapter", + ) + class TestAdapterModule: + pass + + meta = platform_registry[0] + assert meta.module_path is not None + assert "test_platform_manager" in meta.module_path + + +class TestUnregisterPlatformAdaptersByModule: + """Tests for unregister_platform_adapters_by_module function.""" + + def test_unregister_by_module_prefix(self): + """Test unregistering adapters by module prefix.""" + + # Register two adapters with different module paths + @register_platform_adapter( + adapter_name="adapter_to_remove", + desc="To be removed", + ) + class AdapterToRemove: + pass + + # Manually set module path for testing + platform_registry[0].module_path = "plugins.test_plugin.adapter" + + @register_platform_adapter( + adapter_name="adapter_to_keep", + desc="To be kept", + ) + class AdapterToKeep: + pass + + # Manually set module path for testing + platform_registry[1].module_path = "plugins.other_plugin.adapter" + + # Unregister by module prefix + unregistered = unregister_platform_adapters_by_module("plugins.test_plugin") + + assert "adapter_to_remove" in unregistered + assert "adapter_to_keep" not in unregistered + assert "adapter_to_remove" not in platform_cls_map + assert "adapter_to_keep" in platform_cls_map + + # Ensure the registry no longer contains metadata for the removed adapter + remaining_registry_entries = [ + meta for meta in platform_registry if meta.name == "adapter_to_remove" + ] + assert remaining_registry_entries == [] + + # Ensure the kept adapter is still in the registry + kept_registry_entries = [ + meta for meta in platform_registry if meta.name == "adapter_to_keep" + ] + assert len(kept_registry_entries) == 1 + + def test_unregister_no_match(self): + """Test unregistering when no modules match.""" + + @register_platform_adapter( + adapter_name="test_no_match", + desc="Test adapter", + ) + class TestNoMatch: + pass + + unregistered = unregister_platform_adapters_by_module("nonexistent.module") + + assert unregistered == [] + assert "test_no_match" in platform_cls_map + + +class TestPlatformRegistry: + """Tests for platform registry data structures.""" + + def test_platform_registry_is_list(self): + """Test platform_registry is a list.""" + assert isinstance(platform_registry, list) + + def test_platform_cls_map_is_dict(self): + """Test platform_cls_map is a dictionary.""" + assert isinstance(platform_cls_map, dict) + + def test_registry_and_cls_map_consistency(self): + """Test registry and cls_map stay consistent.""" + + @register_platform_adapter( + adapter_name="consistency_test", + desc="Test consistency", + ) + class ConsistencyAdapter: + pass + + # Both should have the adapter + assert len([m for m in platform_registry if m.name == "consistency_test"]) == 1 + assert "consistency_test" in platform_cls_map + + +# NOTE: The following tests previously ran into circular import issues +# when importing PlatformManager directly from astrbot.core.platform.manager. +# To avoid this, they exercise PlatformManager behavior in a separate +# subprocess via `_assert_platform_manager_case(...)`, which imports +# PlatformManager in isolation and prevents circular imports in this process. +# The historical circular import chain was: +# manager.py -> star_handler -> star_tools -> api.platform -> star.register -> star_handler -> astr_agent_context -> context -> manager +# +# WARNING: These tests are currently marked as xfail due to an unresolved +# circular import issue that prevents PlatformManager from being imported +# even in a subprocess. This is a known issue in the codebase that needs +# to be addressed separately. + + +@pytest.mark.xfail( + reason="Circular import issue prevents PlatformManager import even in subprocess" +) +class TestPlatformManagerHelperFunctions: + """Tests for PlatformManager helper functions.""" + + def test_is_valid_platform_id_valid(self): + """Test _is_valid_platform_id with valid IDs.""" + _assert_platform_manager_case("is_valid_platform_id_valid") + + def test_is_valid_platform_id_invalid(self): + """Test _is_valid_platform_id with invalid IDs.""" + _assert_platform_manager_case("is_valid_platform_id_invalid") + + def test_sanitize_platform_id(self): + """Test _sanitize_platform_id function.""" + _assert_platform_manager_case("sanitize_platform_id") + + +@pytest.mark.xfail( + reason="Circular import issue prevents PlatformManager import even in subprocess" +) +class TestPlatformManagerInit: + """Tests for PlatformManager initialization.""" + + def test_platform_manager_init(self): + """Test PlatformManager initialization.""" + _assert_platform_manager_case("platform_manager_init") + + +@pytest.mark.xfail( + reason="Circular import issue prevents PlatformManager import even in subprocess" +) +class TestPlatformManagerGetAllStats: + """Tests for PlatformManager get_all_stats method.""" + + def test_get_all_stats_empty(self): + """Test get_all_stats with no platforms.""" + _assert_platform_manager_case("get_all_stats_empty") + + def test_get_all_stats_with_platforms(self): + """Test get_all_stats with mock platforms.""" + _assert_platform_manager_case("get_all_stats_with_platforms") + + +@pytest.mark.xfail( + reason="Circular import issue prevents PlatformManager import even in subprocess" +) +class TestPlatformManagerGetInsts: + """Tests for PlatformManager get_insts method.""" + + def test_get_insts_empty(self): + """Test get_insts returns empty list when no platforms.""" + _assert_platform_manager_case("get_insts_empty") + + def test_get_insts_returns_platforms(self): + """Test get_insts returns platform instances.""" + _assert_platform_manager_case("get_insts_returns_platforms") diff --git a/tests/unit/test_platform_metadata.py b/tests/unit/test_platform_metadata.py new file mode 100644 index 000000000..30ea380c0 --- /dev/null +++ b/tests/unit/test_platform_metadata.py @@ -0,0 +1,226 @@ +"""Tests for PlatformMetadata class.""" + +from astrbot.core.platform.platform_metadata import PlatformMetadata + + +class TestPlatformMetadata: + """Tests for PlatformMetadata dataclass.""" + + def test_platform_metadata_creation_and_defaults(self): + """Test creating PlatformMetadata with required fields and checking defaults.""" + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + ) + + assert meta.name == "test_platform" + assert meta.description == "A test platform" + assert meta.id == "test_platform_id" + + # Default values + assert meta.default_config_tmpl is None + assert meta.adapter_display_name is None + assert meta.logo_path is None + assert meta.support_streaming_message is True + assert meta.support_proactive_message is True + assert meta.module_path is None + assert meta.i18n_resources is None + assert meta.config_metadata is None + + def test_platform_metadata_with_all_fields(self): + """Test creating PlatformMetadata with all fields.""" + default_config = {"type": "test", "enable": True} + i18n = {"zh-CN": {"name": "测试平台"}, "en-US": {"name": "Test Platform"}} + config_meta = {"fields": [{"name": "token", "type": "string"}]} + + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + default_config_tmpl=default_config, + adapter_display_name="Test Platform Display", + logo_path="logos/test.png", + support_streaming_message=False, + support_proactive_message=False, + module_path="test.module.path", + i18n_resources=i18n, + config_metadata=config_meta, + ) + + assert meta.name == "test_platform" + assert meta.description == "A test platform" + assert meta.id == "test_platform_id" + assert meta.default_config_tmpl == default_config + assert meta.adapter_display_name == "Test Platform Display" + assert meta.logo_path == "logos/test.png" + assert meta.support_streaming_message is False + assert meta.support_proactive_message is False + assert meta.module_path == "test.module.path" + assert meta.i18n_resources == i18n + assert meta.config_metadata == config_meta + + def test_platform_metadata_support_streaming_message(self): + """Test support_streaming_message field.""" + meta_streaming = PlatformMetadata( + name="streaming_platform", + description="Supports streaming", + id="streaming_id", + support_streaming_message=True, + ) + + meta_no_streaming = PlatformMetadata( + name="no_streaming_platform", + description="No streaming support", + id="no_streaming_id", + support_streaming_message=False, + ) + + assert meta_streaming.support_streaming_message is True + assert meta_no_streaming.support_streaming_message is False + + def test_platform_metadata_support_proactive_message(self): + """Test support_proactive_message field.""" + meta_proactive = PlatformMetadata( + name="proactive_platform", + description="Supports proactive messages", + id="proactive_id", + support_proactive_message=True, + ) + + meta_no_proactive = PlatformMetadata( + name="no_proactive_platform", + description="No proactive message support", + id="no_proactive_id", + support_proactive_message=False, + ) + + assert meta_proactive.support_proactive_message is True + assert meta_no_proactive.support_proactive_message is False + + def test_platform_metadata_with_default_config_tmpl(self): + """Test PlatformMetadata with default config template.""" + config_tmpl = { + "type": "test_platform", + "enable": False, + "id": "test_platform", + "token": "", + "secret": "", + } + + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + default_config_tmpl=config_tmpl, + ) + + assert meta.default_config_tmpl == config_tmpl + assert meta.default_config_tmpl["type"] == "test_platform" + assert meta.default_config_tmpl["enable"] is False + + def test_platform_metadata_with_i18n_resources(self): + """Test PlatformMetadata with i18n resources.""" + i18n = { + "zh-CN": { + "name": "测试平台", + "description": "这是一个测试平台", + }, + "en-US": { + "name": "Test Platform", + "description": "This is a test platform", + }, + } + + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + i18n_resources=i18n, + ) + + assert meta.i18n_resources == i18n + assert meta.i18n_resources["zh-CN"]["name"] == "测试平台" + assert meta.i18n_resources["en-US"]["name"] == "Test Platform" + + def test_platform_metadata_with_config_metadata(self): + """Test PlatformMetadata with config metadata.""" + config_meta = { + "fields": [ + {"name": "token", "type": "string", "label": "Token", "required": True}, + { + "name": "secret", + "type": "string", + "label": "Secret", + "required": False, + }, + ] + } + + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + config_metadata=config_meta, + ) + + assert meta.config_metadata == config_meta + assert len(meta.config_metadata["fields"]) == 2 + + def test_platform_metadata_module_path(self): + """Test PlatformMetadata module_path field.""" + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + module_path="astrbot.core.platform.sources.test", + ) + + assert meta.module_path == "astrbot.core.platform.sources.test" + + def test_platform_metadata_adapter_display_name(self): + """Test adapter_display_name field.""" + meta_with_display = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + adapter_display_name="My Test Platform", + ) + + meta_without_display = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + ) + + assert meta_with_display.adapter_display_name == "My Test Platform" + assert meta_without_display.adapter_display_name is None + + def test_platform_metadata_logo_path(self): + """Test logo_path field.""" + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + logo_path="assets/logo.png", + ) + + assert meta.logo_path == "assets/logo.png" + + def test_platform_metadata_accepts_empty_strings(self): + """Test metadata object accepts empty-string identity fields.""" + meta = PlatformMetadata(name="", description="", id="") + assert meta.name == "" + assert meta.description == "" + assert meta.id == "" + + def test_platform_metadata_accepts_nonstandard_i18n_resources(self): + """Test metadata keeps i18n_resources as-is without runtime validation.""" + malformed_i18n = {"zh-CN": "invalid-format"} + meta = PlatformMetadata( + name="test_platform", + description="A test platform", + id="test_platform_id", + i18n_resources=malformed_i18n, + ) + assert meta.i18n_resources == malformed_i18n