Skip to content

Commit c6e7d6b

Browse files
omkute10xuanyang15
authored andcommitted
feat(tools): Add debug logging to VertexAiSearchTool
Merge #3284 **Problem:** When debugging agents that utilize the `VertexAiSearchTool`, it's currently difficult to inspect the specific configuration parameters (datastore ID, engine ID, filter, max_results, etc.) being passed to the underlying Vertex AI Search API via the `LlmRequest`. This lack of visibility can hinder troubleshooting efforts related to tool configuration. **Solution:** This PR enhances the `VertexAiSearchTool` by adding a **debug-level log statement** within the `process_llm_request` method. This log precisely records the parameters being used for the Vertex AI Search configuration just before it's appended to the `LlmRequest`. This provides developers with crucial visibility into the tool's runtime behavior when debug logging is enabled, significantly improving the **debuggability** of agents using this tool. Corresponding unit tests were updated to rigorously verify this new logging output using `caplog`. Additionally, minor fixes were made to the tests to resolve Pydantic validation errors. Co-authored-by: Xuan Yang <xygoogle@google.com> COPYBARA_INTEGRATE_REVIEW=#3284 from omkute10:feat/add-logging-vertex-search-tool 199c12b PiperOrigin-RevId: 836419886
1 parent b331d97 commit c6e7d6b

File tree

2 files changed

+170
-12
lines changed

2 files changed

+170
-12
lines changed

src/google/adk/tools/vertex_ai_search_tool.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from __future__ import annotations
1616

17+
import logging
1718
from typing import Optional
1819
from typing import TYPE_CHECKING
1920

@@ -25,6 +26,8 @@
2526
from .base_tool import BaseTool
2627
from .tool_context import ToolContext
2728

29+
logger = logging.getLogger('google_adk.' + __name__)
30+
2831
if TYPE_CHECKING:
2932
from ..models import LlmRequest
3033

@@ -102,6 +105,30 @@ async def process_llm_request(
102105
)
103106
llm_request.config = llm_request.config or types.GenerateContentConfig()
104107
llm_request.config.tools = llm_request.config.tools or []
108+
109+
# Format data_store_specs concisely for logging
110+
if self.data_store_specs:
111+
spec_ids = [
112+
spec.data_store.split('/')[-1] if spec.data_store else 'unnamed'
113+
for spec in self.data_store_specs
114+
]
115+
specs_info = (
116+
f'{len(self.data_store_specs)} spec(s): [{", ".join(spec_ids)}]'
117+
)
118+
else:
119+
specs_info = None
120+
121+
logger.debug(
122+
'Adding Vertex AI Search tool config to LLM request: '
123+
'datastore=%s, engine=%s, filter=%s, max_results=%s, '
124+
'data_store_specs=%s',
125+
self.data_store_id,
126+
self.search_engine_id,
127+
self.filter,
128+
self.max_results,
129+
specs_info,
130+
)
131+
105132
llm_request.config.tools.append(
106133
types.Tool(
107134
retrieval=types.Retrieval(

tests/unittests/tools/test_vertex_ai_search_tool.py

Lines changed: 143 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import logging
16+
1517
from google.adk.agents.invocation_context import InvocationContext
1618
from google.adk.agents.sequential_agent import SequentialAgent
1719
from google.adk.models.llm_request import LlmRequest
@@ -24,6 +26,10 @@
2426
from google.genai import types
2527
import pytest
2628

29+
VERTEX_SEARCH_TOOL_LOGGER_NAME = (
30+
'google_adk.google.adk.tools.vertex_ai_search_tool'
31+
)
32+
2733

2834
async def _create_tool_context() -> ToolContext:
2935
session_service = InMemorySessionService()
@@ -121,12 +127,34 @@ def test_init_with_data_store_id(self):
121127
tool = VertexAiSearchTool(data_store_id='test_data_store')
122128
assert tool.data_store_id == 'test_data_store'
123129
assert tool.search_engine_id is None
130+
assert tool.data_store_specs is None
124131

125132
def test_init_with_search_engine_id(self):
126133
"""Test initialization with search engine ID."""
127134
tool = VertexAiSearchTool(search_engine_id='test_search_engine')
128135
assert tool.search_engine_id == 'test_search_engine'
129136
assert tool.data_store_id is None
137+
assert tool.data_store_specs is None
138+
139+
def test_init_with_engine_and_specs(self):
140+
"""Test initialization with search engine ID and specs."""
141+
specs = [
142+
types.VertexAISearchDataStoreSpec(
143+
dataStore=(
144+
'projects/p/locations/l/collections/c/dataStores/spec_store'
145+
)
146+
)
147+
]
148+
engine_id = (
149+
'projects/p/locations/l/collections/c/engines/test_search_engine'
150+
)
151+
tool = VertexAiSearchTool(
152+
search_engine_id=engine_id,
153+
data_store_specs=specs,
154+
)
155+
assert tool.search_engine_id == engine_id
156+
assert tool.data_store_id is None
157+
assert tool.data_store_specs == specs
130158

131159
def test_init_with_neither_raises_error(self):
132160
"""Test that initialization without either ID raises ValueError."""
@@ -146,10 +174,34 @@ def test_init_with_both_raises_error(self):
146174
data_store_id='test_data_store', search_engine_id='test_search_engine'
147175
)
148176

177+
def test_init_with_specs_but_no_engine_raises_error(self):
178+
"""Test that specs without engine ID raises ValueError."""
179+
specs = [
180+
types.VertexAISearchDataStoreSpec(
181+
dataStore=(
182+
'projects/p/locations/l/collections/c/dataStores/spec_store'
183+
)
184+
)
185+
]
186+
with pytest.raises(
187+
ValueError,
188+
match=(
189+
'search_engine_id must be specified if data_store_specs is'
190+
' specified'
191+
),
192+
):
193+
VertexAiSearchTool(
194+
data_store_id='test_data_store', data_store_specs=specs
195+
)
196+
149197
@pytest.mark.asyncio
150-
async def test_process_llm_request_with_simple_gemini_model(self):
198+
async def test_process_llm_request_with_simple_gemini_model(self, caplog):
151199
"""Test processing LLM request with simple Gemini model name."""
152-
tool = VertexAiSearchTool(data_store_id='test_data_store')
200+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
201+
202+
tool = VertexAiSearchTool(
203+
data_store_id='test_data_store', filter='f', max_results=5
204+
)
153205
tool_context = await _create_tool_context()
154206

155207
llm_request = LlmRequest(
@@ -162,17 +214,56 @@ async def test_process_llm_request_with_simple_gemini_model(self):
162214

163215
assert llm_request.config.tools is not None
164216
assert len(llm_request.config.tools) == 1
165-
assert llm_request.config.tools[0].retrieval is not None
166-
assert llm_request.config.tools[0].retrieval.vertex_ai_search is not None
217+
retrieval_tool = llm_request.config.tools[0]
218+
assert retrieval_tool.retrieval is not None
219+
assert retrieval_tool.retrieval.vertex_ai_search is not None
220+
assert (
221+
retrieval_tool.retrieval.vertex_ai_search.datastore == 'test_data_store'
222+
)
223+
assert retrieval_tool.retrieval.vertex_ai_search.engine is None
224+
assert retrieval_tool.retrieval.vertex_ai_search.filter == 'f'
225+
assert retrieval_tool.retrieval.vertex_ai_search.max_results == 5
226+
227+
# Verify debug log
228+
debug_records = [
229+
r
230+
for r in caplog.records
231+
if 'Adding Vertex AI Search tool config' in r.message
232+
]
233+
assert len(debug_records) == 1
234+
log_message = debug_records[0].getMessage()
235+
assert 'datastore=test_data_store' in log_message
236+
assert 'engine=None' in log_message
237+
assert 'filter=f' in log_message
238+
assert 'max_results=5' in log_message
239+
assert 'data_store_specs=None' in log_message
167240

168241
@pytest.mark.asyncio
169-
async def test_process_llm_request_with_path_based_gemini_model(self):
242+
async def test_process_llm_request_with_path_based_gemini_model(self, caplog):
170243
"""Test processing LLM request with path-based Gemini model name."""
171-
tool = VertexAiSearchTool(data_store_id='test_data_store')
244+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
245+
246+
specs = [
247+
types.VertexAISearchDataStoreSpec(
248+
dataStore=(
249+
'projects/p/locations/l/collections/c/dataStores/spec_store'
250+
)
251+
)
252+
]
253+
engine_id = 'projects/p/locations/l/collections/c/engines/test_engine'
254+
tool = VertexAiSearchTool(
255+
search_engine_id=engine_id,
256+
data_store_specs=specs,
257+
filter='f2',
258+
max_results=10,
259+
)
172260
tool_context = await _create_tool_context()
173261

174262
llm_request = LlmRequest(
175-
model='projects/265104255505/locations/us-central1/publishers/google/models/gemini-2.0-flash-001',
263+
model=(
264+
'projects/265104255505/locations/us-central1/publishers/'
265+
'google/models/gemini-2.0-flash-001'
266+
),
176267
config=types.GenerateContentConfig(),
177268
)
178269

@@ -182,8 +273,28 @@ async def test_process_llm_request_with_path_based_gemini_model(self):
182273

183274
assert llm_request.config.tools is not None
184275
assert len(llm_request.config.tools) == 1
185-
assert llm_request.config.tools[0].retrieval is not None
186-
assert llm_request.config.tools[0].retrieval.vertex_ai_search is not None
276+
retrieval_tool = llm_request.config.tools[0]
277+
assert retrieval_tool.retrieval is not None
278+
assert retrieval_tool.retrieval.vertex_ai_search is not None
279+
assert retrieval_tool.retrieval.vertex_ai_search.datastore is None
280+
assert retrieval_tool.retrieval.vertex_ai_search.engine == engine_id
281+
assert retrieval_tool.retrieval.vertex_ai_search.filter == 'f2'
282+
assert retrieval_tool.retrieval.vertex_ai_search.max_results == 10
283+
assert retrieval_tool.retrieval.vertex_ai_search.data_store_specs == specs
284+
285+
# Verify debug log
286+
debug_records = [
287+
r
288+
for r in caplog.records
289+
if 'Adding Vertex AI Search tool config' in r.message
290+
]
291+
assert len(debug_records) == 1
292+
log_message = debug_records[0].getMessage()
293+
assert 'datastore=None' in log_message
294+
assert f'engine={engine_id}' in log_message
295+
assert 'filter=f2' in log_message
296+
assert 'max_results=10' in log_message
297+
assert 'data_store_specs=1 spec(s): [spec_store]' in log_message
187298

188299
@pytest.mark.asyncio
189300
async def test_process_llm_request_with_gemini_1_and_other_tools_raises_error(
@@ -291,9 +402,11 @@ async def test_process_llm_request_with_path_based_non_gemini_model_raises_error
291402

292403
@pytest.mark.asyncio
293404
async def test_process_llm_request_with_gemini_2_and_other_tools_succeeds(
294-
self,
405+
self, caplog
295406
):
296407
"""Test that Gemini 2.x with other tools succeeds."""
408+
caplog.set_level(logging.DEBUG, logger=VERTEX_SEARCH_TOOL_LOGGER_NAME)
409+
297410
tool = VertexAiSearchTool(data_store_id='test_data_store')
298411
tool_context = await _create_tool_context()
299412

@@ -316,5 +429,23 @@ async def test_process_llm_request_with_gemini_2_and_other_tools_succeeds(
316429
assert llm_request.config.tools is not None
317430
assert len(llm_request.config.tools) == 2
318431
assert llm_request.config.tools[0] == existing_tool
319-
assert llm_request.config.tools[1].retrieval is not None
320-
assert llm_request.config.tools[1].retrieval.vertex_ai_search is not None
432+
retrieval_tool = llm_request.config.tools[1]
433+
assert retrieval_tool.retrieval is not None
434+
assert retrieval_tool.retrieval.vertex_ai_search is not None
435+
assert (
436+
retrieval_tool.retrieval.vertex_ai_search.datastore == 'test_data_store'
437+
)
438+
439+
# Verify debug log
440+
debug_records = [
441+
r
442+
for r in caplog.records
443+
if 'Adding Vertex AI Search tool config' in r.message
444+
]
445+
assert len(debug_records) == 1
446+
log_message = debug_records[0].getMessage()
447+
assert 'datastore=test_data_store' in log_message
448+
assert 'engine=None' in log_message
449+
assert 'filter=None' in log_message
450+
assert 'max_results=None' in log_message
451+
assert 'data_store_specs=None' in log_message

0 commit comments

Comments
 (0)