Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion contributing/samples/gepa/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
from tau_bench.types import EnvRunResult
from tau_bench.types import RunConfig
import tau_bench_agent as tau_bench_agent_lib

import utils


Expand Down
1 change: 0 additions & 1 deletion contributing/samples/gepa/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from absl import flags
import experiment
from google.genai import types

import utils

_OUTPUT_DIR = flags.DEFINE_string(
Expand Down
36 changes: 17 additions & 19 deletions src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1387,13 +1387,7 @@ def _model_response_to_generate_content_response(
if finish_reason:
# If LiteLLM already provides a FinishReason enum (e.g., for Gemini), use
# it directly. Otherwise, map the finish_reason string to the enum.
if isinstance(finish_reason, types.FinishReason):
llm_response.finish_reason = finish_reason
else:
finish_reason_str = str(finish_reason).lower()
llm_response.finish_reason = _FINISH_REASON_MAPPING.get(
finish_reason_str, types.FinishReason.OTHER
)
llm_response.finish_reason = _map_finish_reason(finish_reason)
if response.get("usage", None):
llm_response.usage_metadata = types.GenerateContentResponseUsageMetadata(
prompt_token_count=response["usage"].get("prompt_tokens", 0),
Expand Down Expand Up @@ -1968,18 +1962,22 @@ async def generate_content_async(
),
)
)
aggregated_llm_response_with_tool_call = (
_message_to_generate_content_response(
ChatCompletionAssistantMessage(
role="assistant",
content=text,
tool_calls=tool_calls,
),
model_version=part.model,
thought_parts=list(reasoning_parts)
if reasoning_parts
else None,
)
aggregated_llm_response_with_tool_call = _message_to_generate_content_response(
ChatCompletionAssistantMessage(
role="assistant",
# FIX: Set content=None for tool-only messages to avoid duplication
# and follow OpenAI/LiteLLM conventions. Planning/reasoning text is
# already streamed (lines 1288-1296) and preserved in thought_parts
# (line 1357). Including it again in content causes duplication and
# violates API specifications for tool-call messages.
# See: https://github.com/google/adk-python/issues/3697
content=None,
tool_calls=tool_calls,
),
model_version=part.model,
thought_parts=list(reasoning_parts)
if reasoning_parts
else None,
)
aggregated_llm_response_with_tool_call.finish_reason = (
_map_finish_reason(finish_reason)
Expand Down