Skip to content

Commit 2de7412

Browse files
committed
Handle DeepSeek reasoning content on OpenAI client path
1 parent 1611a60 commit 2de7412

File tree

2 files changed

+140
-1
lines changed

2 files changed

+140
-1
lines changed

src/agents/models/openai_chatcompletions.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,10 @@ async def _fetch_response(
218218
stream: bool = False,
219219
prompt: ResponsePromptParam | None = None,
220220
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
221-
converted_messages = Converter.items_to_messages(input)
221+
include_reasoning_content = self._should_include_reasoning_content(model_settings)
222+
converted_messages = Converter.items_to_messages(
223+
input, include_reasoning_content=include_reasoning_content
224+
)
222225

223226
if system_instructions:
224227
converted_messages.insert(
@@ -337,6 +340,21 @@ async def _fetch_response(
337340
)
338341
return response, ret
339342

343+
def _should_include_reasoning_content(self, model_settings: ModelSettings) -> bool:
344+
"""Determine whether to forward reasoning_content on assistant messages."""
345+
model_name = str(self.model).lower()
346+
base_url = str(getattr(self._client, "base_url", "") or "").lower()
347+
348+
if "deepseek" in model_name or "deepseek.com" in base_url:
349+
return True
350+
351+
if isinstance(model_settings.extra_body, dict) and "thinking" in model_settings.extra_body:
352+
return True
353+
if model_settings.extra_args and "thinking" in model_settings.extra_args:
354+
return True
355+
356+
return False
357+
340358
def _get_client(self) -> AsyncOpenAI:
341359
if self._client is None:
342360
self._client = AsyncOpenAI()

tests/test_reasoning_content.py

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44
from typing import Any, cast
55

66
import pytest
7+
from openai import AsyncOpenAI
78
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
9+
from openai.types.chat.chat_completion import Choice as CompletionChoice
810
from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta
911
from openai.types.completion_usage import (
1012
CompletionTokensDetails,
@@ -19,6 +21,7 @@
1921
)
2022

2123
from agents.model_settings import ModelSettings
24+
from agents.models.chatcmpl_converter import Converter
2225
from agents.models.interface import ModelTracing
2326
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
2427
from agents.models.openai_provider import OpenAIProvider
@@ -340,3 +343,121 @@ async def patched_fetch_response(self, *args, **kwargs):
340343
assert isinstance(response_event.response.output[0], ResponseOutputMessage)
341344
assert isinstance(response_event.response.output[0].content[0], ResponseOutputText)
342345
assert response_event.response.output[0].content[0].text == "The answer is 42"
346+
347+
348+
@pytest.mark.allow_call_model_methods
349+
@pytest.mark.asyncio
350+
async def test_openai_chatcompletions_includes_reasoning_for_deepseek(monkeypatch) -> None:
351+
"""
352+
Ensure reasoning_content is forwarded when calling DeepSeek via OpenAI-compatible client.
353+
"""
354+
355+
def spy_items_to_messages(
356+
items: Any, preserve_thinking_blocks: bool = False, include_reasoning_content: bool = False
357+
):
358+
spy_items_to_messages.include_reasoning_content = include_reasoning_content # type: ignore[attr-defined] # noqa: E501
359+
return []
360+
361+
monkeypatch.setattr(Converter, "items_to_messages", staticmethod(spy_items_to_messages))
362+
363+
class DummyCompletions:
364+
async def create(self, **kwargs):
365+
return ChatCompletion(
366+
id="resp-id",
367+
created=0,
368+
model="deepseek-reasoner",
369+
object="chat.completion",
370+
choices=[
371+
CompletionChoice(
372+
index=0,
373+
finish_reason="stop",
374+
message=ChatCompletionMessage(role="assistant", content="Hi"),
375+
)
376+
],
377+
usage=CompletionUsage(completion_tokens=1, prompt_tokens=1, total_tokens=2),
378+
)
379+
380+
class DummyChat:
381+
def __init__(self):
382+
self.completions = DummyCompletions()
383+
384+
class DummyClient:
385+
def __init__(self):
386+
self.chat = DummyChat()
387+
self.base_url = "https://api.deepseek.com"
388+
389+
model = OpenAIChatCompletionsModel("deepseek-reasoner", cast(AsyncOpenAI, DummyClient()))
390+
391+
await model.get_response(
392+
system_instructions=None,
393+
input="",
394+
model_settings=ModelSettings(),
395+
tools=[],
396+
output_schema=None,
397+
handoffs=[],
398+
tracing=ModelTracing.DISABLED,
399+
previous_response_id=None,
400+
conversation_id=None,
401+
prompt=None,
402+
)
403+
404+
assert getattr(spy_items_to_messages, "include_reasoning_content", False) is True
405+
406+
407+
@pytest.mark.allow_call_model_methods
408+
@pytest.mark.asyncio
409+
async def test_openai_chatcompletions_reasoning_disabled_for_non_deepseek(monkeypatch) -> None:
410+
"""
411+
Verify reasoning_content is not added for non-DeepSeek calls by default.
412+
"""
413+
414+
def spy_items_to_messages(
415+
items: Any, preserve_thinking_blocks: bool = False, include_reasoning_content: bool = False
416+
):
417+
spy_items_to_messages.include_reasoning_content = include_reasoning_content # type: ignore[attr-defined] # noqa: E501
418+
return []
419+
420+
monkeypatch.setattr(Converter, "items_to_messages", staticmethod(spy_items_to_messages))
421+
422+
class DummyCompletions:
423+
async def create(self, **kwargs):
424+
return ChatCompletion(
425+
id="resp-id",
426+
created=0,
427+
model="gpt-4o",
428+
object="chat.completion",
429+
choices=[
430+
CompletionChoice(
431+
index=0,
432+
finish_reason="stop",
433+
message=ChatCompletionMessage(role="assistant", content="Hi"),
434+
)
435+
],
436+
usage=CompletionUsage(completion_tokens=1, prompt_tokens=1, total_tokens=2),
437+
)
438+
439+
class DummyChat:
440+
def __init__(self):
441+
self.completions = DummyCompletions()
442+
443+
class DummyClient:
444+
def __init__(self):
445+
self.chat = DummyChat()
446+
self.base_url = "https://api.openai.com"
447+
448+
model = OpenAIChatCompletionsModel("gpt-4o", cast(AsyncOpenAI, DummyClient()))
449+
450+
await model.get_response(
451+
system_instructions=None,
452+
input="",
453+
model_settings=ModelSettings(),
454+
tools=[],
455+
output_schema=None,
456+
handoffs=[],
457+
tracing=ModelTracing.DISABLED,
458+
previous_response_id=None,
459+
conversation_id=None,
460+
prompt=None,
461+
)
462+
463+
assert getattr(spy_items_to_messages, "include_reasoning_content", False) is False

0 commit comments

Comments
 (0)