Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 20 additions & 3 deletions src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def _finalize_model_response_event(
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
function_call_id_cache: Optional[dict[str, str]] = None,
) -> Event:
"""Finalize and build the model response event from LLM response.

Expand All @@ -91,6 +92,9 @@ def _finalize_model_response_event(
llm_request: The original LLM request.
llm_response: The LLM response from the model.
model_response_event: The base event to populate.
function_call_id_cache: Optional dict mapping function call names to
previously generated IDs. Used to keep IDs stable across partial
and final streaming events.

Returns:
The finalized Event with LLM response data merged in.
Expand All @@ -103,7 +107,9 @@ def _finalize_model_response_event(
if finalized_event.content:
function_calls = finalized_event.get_function_calls()
if function_calls:
functions.populate_client_function_call_id(finalized_event)
functions.populate_client_function_call_id(
finalized_event, function_call_id_cache
)
finalized_event.long_running_tool_ids = (
functions.get_long_running_function_calls(
function_calls, llm_request.tools_dict
Expand Down Expand Up @@ -827,6 +833,9 @@ async def _run_one_step_async(
author=invocation_context.agent.name,
branch=invocation_context.branch,
)
# Cache maps function call names to generated IDs so that partial and
# final streaming events for the same call share a stable ID.
function_call_id_cache: dict[str, str] = {}
Comment on lines +836 to +838
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

This change correctly introduces a cache to stabilize function call IDs for the SSE streaming mode handled by _run_one_step_async. However, the live/bidi-streaming mode handled by run_live appears to be missing this fix. The run_live method does not create or pass a function_call_id_cache, leading to unstable function call IDs in that streaming scenario. The caching mechanism should also be implemented for the run_live flow to ensure consistent behavior across all streaming modes.

async with Aclosing(
self._call_llm_async(
invocation_context, llm_request, model_response_event
Expand All @@ -840,6 +849,7 @@ async def _run_one_step_async(
llm_request,
llm_response,
model_response_event,
function_call_id_cache,
)
) as agen:
async for event in agen:
Expand Down Expand Up @@ -886,6 +896,7 @@ async def _postprocess_async(
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
function_call_id_cache: Optional[dict[str, str]] = None,
) -> AsyncGenerator[Event, None]:
"""Postprocess after calling the LLM.

Expand All @@ -894,6 +905,9 @@ async def _postprocess_async(
llm_request: The original LLM request.
llm_response: The LLM response from the LLM call.
model_response_event: A mutable event for the LLM response.
function_call_id_cache: Optional dict mapping function call names to
previously generated IDs. Keeps IDs stable across partial and final
streaming events.

Yields:
A generator of events.
Expand All @@ -917,7 +931,8 @@ async def _postprocess_async(

# Builds the event.
model_response_event = self._finalize_model_response_event(
llm_request, llm_response, model_response_event
llm_request, llm_response, model_response_event,
function_call_id_cache,
)
yield model_response_event

Expand Down Expand Up @@ -1197,9 +1212,11 @@ def _finalize_model_response_event(
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
function_call_id_cache: Optional[dict[str, str]] = None,
) -> Event:
return _finalize_model_response_event(
llm_request, llm_response, model_response_event
llm_request, llm_response, model_response_event,
function_call_id_cache,
)

async def _resolve_toolset_auth(
Expand Down
19 changes: 16 additions & 3 deletions src/google/adk/flows/llm_flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,12 +181,25 @@ def generate_client_function_call_id() -> str:
return f'{AF_FUNCTION_CALL_ID_PREFIX}{uuid.uuid4()}'


def populate_client_function_call_id(model_response_event: Event) -> None:
def populate_client_function_call_id(
model_response_event: Event,
function_call_id_cache: Optional[dict[str, str]] = None,
) -> None:
if not model_response_event.get_function_calls():
return
for function_call in model_response_event.get_function_calls():
for idx, function_call in enumerate(
model_response_event.get_function_calls()
):
if not function_call.id:
function_call.id = generate_client_function_call_id()
# Use (name, index) as cache key so that two calls to the same
# function in a single response keep separate stable IDs.
cache_key = f'{function_call.name}:{idx}'
if function_call_id_cache is not None and cache_key in function_call_id_cache:
function_call.id = function_call_id_cache[cache_key]
else:
function_call.id = generate_client_function_call_id()
if function_call_id_cache is not None:
function_call_id_cache[cache_key] = function_call.id
Comment on lines +197 to +202
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This logic for handling the cache can be simplified. Using dict.setdefault() can make the code more concise and easier to read by combining the check for existence, value retrieval, and setting the default value into a single operation.

Suggested change
if function_call_id_cache is not None and cache_key in function_call_id_cache:
function_call.id = function_call_id_cache[cache_key]
else:
function_call.id = generate_client_function_call_id()
if function_call_id_cache is not None:
function_call_id_cache[cache_key] = function_call.id
if function_call_id_cache is not None:
function_call.id = function_call_id_cache.setdefault(
cache_key, generate_client_function_call_id()
)
else:
function_call.id = generate_client_function_call_id()



def remove_client_function_call_id(content: Optional[types.Content]) -> None:
Expand Down
196 changes: 196 additions & 0 deletions tests/unittests/flows/llm_flows/test_streaming_function_call_ids.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Tests that function call IDs stay stable across streaming events."""

from google.adk.events.event import Event
from google.adk.flows.llm_flows.base_llm_flow import _finalize_model_response_event
from google.adk.flows.llm_flows.functions import populate_client_function_call_id
from google.adk.models.llm_request import LlmRequest
from google.adk.models.llm_response import LlmResponse
from google.genai import types
import pytest


def _make_fc_response(name: str, args: dict | None = None, partial: bool = False) -> LlmResponse:
"""Create an LlmResponse containing a single function call."""
fc = types.FunctionCall(name=name, args=args or {})
return LlmResponse(
content=types.Content(role='model', parts=[types.Part(function_call=fc)]),
partial=partial,
)


def _make_multi_fc_response(calls: list[tuple[str, dict]], partial: bool = False) -> LlmResponse:
"""Create an LlmResponse containing multiple function calls."""
parts = [
types.Part(function_call=types.FunctionCall(name=name, args=args))
for name, args in calls
]
return LlmResponse(
content=types.Content(role='model', parts=parts),
partial=partial,
)


class TestPopulateClientFunctionCallIdWithCache:
"""Tests for populate_client_function_call_id with ID caching."""

def test_generates_id_and_stores_in_cache(self):
event = Event(author='agent')
event.content = types.Content(
role='model',
parts=[types.Part(function_call=types.FunctionCall(name='get_weather', args={}))],
)
cache: dict[str, str] = {}
populate_client_function_call_id(event, cache)
fc = event.get_function_calls()[0]
assert fc.id.startswith('adk-')
assert 'get_weather:0' in cache
assert cache['get_weather:0'] == fc.id

def test_reuses_cached_id(self):
cache: dict[str, str] = {'get_weather:0': 'adk-cached-id-123'}

event = Event(author='agent')
event.content = types.Content(
role='model',
parts=[types.Part(function_call=types.FunctionCall(name='get_weather', args={}))],
)
populate_client_function_call_id(event, cache)
assert event.get_function_calls()[0].id == 'adk-cached-id-123'

def test_no_cache_generates_new_id_each_time(self):
event1 = Event(author='agent')
event1.content = types.Content(
role='model',
parts=[types.Part(function_call=types.FunctionCall(name='get_weather', args={}))],
)
event2 = Event(author='agent')
event2.content = types.Content(
role='model',
parts=[types.Part(function_call=types.FunctionCall(name='get_weather', args={}))],
)
populate_client_function_call_id(event1)
populate_client_function_call_id(event2)
assert event1.get_function_calls()[0].id != event2.get_function_calls()[0].id

def test_multiple_calls_same_name_get_separate_ids(self):
event = Event(author='agent')
event.content = types.Content(
role='model',
parts=[
types.Part(function_call=types.FunctionCall(name='search', args={'q': 'a'})),
types.Part(function_call=types.FunctionCall(name='search', args={'q': 'b'})),
],
)
cache: dict[str, str] = {}
populate_client_function_call_id(event, cache)
fcs = event.get_function_calls()
assert fcs[0].id != fcs[1].id
assert cache['search:0'] == fcs[0].id
assert cache['search:1'] == fcs[1].id

def test_skips_function_calls_that_already_have_ids(self):
event = Event(author='agent')
event.content = types.Content(
role='model',
parts=[types.Part(function_call=types.FunctionCall(
name='get_weather', args={}, id='server-provided-id'))],
)
cache: dict[str, str] = {}
populate_client_function_call_id(event, cache)
assert event.get_function_calls()[0].id == 'server-provided-id'
assert len(cache) == 0


class TestFinalizeModelResponseEventWithCache:
"""Tests that _finalize_model_response_event preserves IDs via cache."""

def test_partial_and_final_share_same_function_call_id(self):
model_response_event = Event(
author='agent',
invocation_id='inv-1',
)
llm_request = LlmRequest(model='mock', contents=[])
cache: dict[str, str] = {}

# Partial event
partial_response = _make_fc_response('get_weather', partial=True)
partial_event = _finalize_model_response_event(
llm_request, partial_response, model_response_event, cache,
)
partial_id = partial_event.get_function_calls()[0].id
assert partial_id.startswith('adk-')

# Final event — same function call must get the same ID
final_response = _make_fc_response('get_weather', partial=False)
final_event = _finalize_model_response_event(
llm_request, final_response, model_response_event, cache,
)
final_id = final_event.get_function_calls()[0].id
assert final_id == partial_id

def test_without_cache_ids_differ(self):
model_response_event = Event(
author='agent',
invocation_id='inv-1',
)
llm_request = LlmRequest(model='mock', contents=[])

partial_response = _make_fc_response('get_weather', partial=True)
partial_event = _finalize_model_response_event(
llm_request, partial_response, model_response_event,
)
partial_id = partial_event.get_function_calls()[0].id

final_response = _make_fc_response('get_weather', partial=False)
final_event = _finalize_model_response_event(
llm_request, final_response, model_response_event,
)
final_id = final_event.get_function_calls()[0].id

# Without cache, IDs are different (this is the bug scenario)
assert final_id != partial_id

def test_multi_function_call_streaming_preserves_all_ids(self):
model_response_event = Event(
author='agent',
invocation_id='inv-1',
)
llm_request = LlmRequest(model='mock', contents=[])
cache: dict[str, str] = {}

# Partial with two function calls
partial_response = _make_multi_fc_response(
[('search', {'q': 'weather'}), ('lookup', {'id': '42'})],
partial=True,
)
partial_event = _finalize_model_response_event(
llm_request, partial_response, model_response_event, cache,
)
partial_ids = [fc.id for fc in partial_event.get_function_calls()]

# Final with same two function calls
final_response = _make_multi_fc_response(
[('search', {'q': 'weather'}), ('lookup', {'id': '42'})],
partial=False,
)
final_event = _finalize_model_response_event(
llm_request, final_response, model_response_event, cache,
)
final_ids = [fc.id for fc in final_event.get_function_calls()]

assert partial_ids == final_ids
assert partial_ids[0] != partial_ids[1] # different calls have different IDs