|
4 | 4 | from typing import Any, cast |
5 | 5 |
|
6 | 6 | import pytest |
| 7 | +from openai import AsyncOpenAI |
7 | 8 | from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage |
| 9 | +from openai.types.chat.chat_completion import Choice as CompletionChoice |
8 | 10 | from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta |
9 | 11 | from openai.types.completion_usage import ( |
10 | 12 | CompletionTokensDetails, |
|
19 | 21 | ) |
20 | 22 |
|
21 | 23 | from agents.model_settings import ModelSettings |
| 24 | +from agents.models.chatcmpl_converter import Converter |
22 | 25 | from agents.models.interface import ModelTracing |
23 | 26 | from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel |
24 | 27 | from agents.models.openai_provider import OpenAIProvider |
@@ -340,3 +343,121 @@ async def patched_fetch_response(self, *args, **kwargs): |
340 | 343 | assert isinstance(response_event.response.output[0], ResponseOutputMessage) |
341 | 344 | assert isinstance(response_event.response.output[0].content[0], ResponseOutputText) |
342 | 345 | assert response_event.response.output[0].content[0].text == "The answer is 42" |
| 346 | + |
| 347 | + |
| 348 | +@pytest.mark.allow_call_model_methods |
| 349 | +@pytest.mark.asyncio |
| 350 | +async def test_openai_chatcompletions_includes_reasoning_for_deepseek(monkeypatch) -> None: |
| 351 | + """ |
| 352 | + Ensure reasoning_content is forwarded when calling DeepSeek via OpenAI-compatible client. |
| 353 | + """ |
| 354 | + |
| 355 | + def spy_items_to_messages( |
| 356 | + items: Any, preserve_thinking_blocks: bool = False, include_reasoning_content: bool = False |
| 357 | + ): |
| 358 | + spy_items_to_messages.include_reasoning_content = include_reasoning_content # type: ignore[attr-defined] # noqa: E501 |
| 359 | + return [] |
| 360 | + |
| 361 | + monkeypatch.setattr(Converter, "items_to_messages", staticmethod(spy_items_to_messages)) |
| 362 | + |
| 363 | + class DummyCompletions: |
| 364 | + async def create(self, **kwargs): |
| 365 | + return ChatCompletion( |
| 366 | + id="resp-id", |
| 367 | + created=0, |
| 368 | + model="deepseek-reasoner", |
| 369 | + object="chat.completion", |
| 370 | + choices=[ |
| 371 | + CompletionChoice( |
| 372 | + index=0, |
| 373 | + finish_reason="stop", |
| 374 | + message=ChatCompletionMessage(role="assistant", content="Hi"), |
| 375 | + ) |
| 376 | + ], |
| 377 | + usage=CompletionUsage(completion_tokens=1, prompt_tokens=1, total_tokens=2), |
| 378 | + ) |
| 379 | + |
| 380 | + class DummyChat: |
| 381 | + def __init__(self): |
| 382 | + self.completions = DummyCompletions() |
| 383 | + |
| 384 | + class DummyClient: |
| 385 | + def __init__(self): |
| 386 | + self.chat = DummyChat() |
| 387 | + self.base_url = "https://api.deepseek.com" |
| 388 | + |
| 389 | + model = OpenAIChatCompletionsModel("deepseek-reasoner", cast(AsyncOpenAI, DummyClient())) |
| 390 | + |
| 391 | + await model.get_response( |
| 392 | + system_instructions=None, |
| 393 | + input="", |
| 394 | + model_settings=ModelSettings(), |
| 395 | + tools=[], |
| 396 | + output_schema=None, |
| 397 | + handoffs=[], |
| 398 | + tracing=ModelTracing.DISABLED, |
| 399 | + previous_response_id=None, |
| 400 | + conversation_id=None, |
| 401 | + prompt=None, |
| 402 | + ) |
| 403 | + |
| 404 | + assert getattr(spy_items_to_messages, "include_reasoning_content", False) is True |
| 405 | + |
| 406 | + |
| 407 | +@pytest.mark.allow_call_model_methods |
| 408 | +@pytest.mark.asyncio |
| 409 | +async def test_openai_chatcompletions_reasoning_disabled_for_non_deepseek(monkeypatch) -> None: |
| 410 | + """ |
| 411 | + Verify reasoning_content is not added for non-DeepSeek calls by default. |
| 412 | + """ |
| 413 | + |
| 414 | + def spy_items_to_messages( |
| 415 | + items: Any, preserve_thinking_blocks: bool = False, include_reasoning_content: bool = False |
| 416 | + ): |
| 417 | + spy_items_to_messages.include_reasoning_content = include_reasoning_content # type: ignore[attr-defined] # noqa: E501 |
| 418 | + return [] |
| 419 | + |
| 420 | + monkeypatch.setattr(Converter, "items_to_messages", staticmethod(spy_items_to_messages)) |
| 421 | + |
| 422 | + class DummyCompletions: |
| 423 | + async def create(self, **kwargs): |
| 424 | + return ChatCompletion( |
| 425 | + id="resp-id", |
| 426 | + created=0, |
| 427 | + model="gpt-4o", |
| 428 | + object="chat.completion", |
| 429 | + choices=[ |
| 430 | + CompletionChoice( |
| 431 | + index=0, |
| 432 | + finish_reason="stop", |
| 433 | + message=ChatCompletionMessage(role="assistant", content="Hi"), |
| 434 | + ) |
| 435 | + ], |
| 436 | + usage=CompletionUsage(completion_tokens=1, prompt_tokens=1, total_tokens=2), |
| 437 | + ) |
| 438 | + |
| 439 | + class DummyChat: |
| 440 | + def __init__(self): |
| 441 | + self.completions = DummyCompletions() |
| 442 | + |
| 443 | + class DummyClient: |
| 444 | + def __init__(self): |
| 445 | + self.chat = DummyChat() |
| 446 | + self.base_url = "https://api.openai.com" |
| 447 | + |
| 448 | + model = OpenAIChatCompletionsModel("gpt-4o", cast(AsyncOpenAI, DummyClient())) |
| 449 | + |
| 450 | + await model.get_response( |
| 451 | + system_instructions=None, |
| 452 | + input="", |
| 453 | + model_settings=ModelSettings(), |
| 454 | + tools=[], |
| 455 | + output_schema=None, |
| 456 | + handoffs=[], |
| 457 | + tracing=ModelTracing.DISABLED, |
| 458 | + previous_response_id=None, |
| 459 | + conversation_id=None, |
| 460 | + prompt=None, |
| 461 | + ) |
| 462 | + |
| 463 | + assert getattr(spy_items_to_messages, "include_reasoning_content", False) is False |
0 commit comments