Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion contributing/samples/gepa/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
from tau_bench.types import EnvRunResult
from tau_bench.types import RunConfig
import tau_bench_agent as tau_bench_agent_lib

import utils


Expand Down
1 change: 0 additions & 1 deletion contributing/samples/gepa/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from absl import flags
import experiment
from google.genai import types

import utils

_OUTPUT_DIR = flags.DEFINE_string(
Expand Down
48 changes: 37 additions & 11 deletions src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1491,6 +1491,29 @@ def _message_to_generate_content_response(
)


def _enforce_closed_schema(schema: dict):
if not isinstance(schema, dict):
return

if schema.get("type") == "object":
schema.setdefault("additionalProperties", False)

for prop in schema.get("properties", {}).values():
_enforce_closed_schema(prop)

if "items" in schema:
_enforce_closed_schema(schema["items"])

if "$defs" in schema:
for def_schema in schema["$defs"].values():
_enforce_closed_schema(def_schema)

for key in ("anyOf", "oneOf", "allOf"):
if key in schema:
for sub_schema in schema[key]:
_enforce_closed_schema(sub_schema)


def _to_litellm_response_format(
response_schema: types.SchemaUnion,
model: str,
Expand Down Expand Up @@ -1551,14 +1574,9 @@ def _to_litellm_response_format(

# OpenAI-compatible format (default) per LiteLLM docs:
# https://docs.litellm.ai/docs/completion/json_mode
if (
isinstance(schema_dict, dict)
and schema_dict.get("type") == "object"
and "additionalProperties" not in schema_dict
):
if isinstance(schema_dict, dict):
# OpenAI structured outputs require explicit additionalProperties: false.
schema_dict = dict(schema_dict)
schema_dict["additionalProperties"] = False
_enforce_closed_schema(schema_dict)

return {
"type": "json_schema",
Expand Down Expand Up @@ -1793,7 +1811,12 @@ def _warn_gemini_via_litellm(model_string: str) -> None:
# Check if warning should be suppressed via environment variable
if os.environ.get(
"ADK_SUPPRESS_GEMINI_LITELLM_WARNINGS", ""
).strip().lower() in ("1", "true", "yes", "on"):
).strip().lower() in (
"1",
"true",
"yes",
"on",
):
return

warnings.warn(
Expand Down Expand Up @@ -1897,9 +1920,12 @@ async def generate_content_async(
logger.debug(_build_request_log(llm_request))

effective_model = llm_request.model or self.model
messages, tools, response_format, generation_params = (
await _get_completion_inputs(llm_request, effective_model)
)
(
messages,
tools,
response_format,
generation_params,
) = await _get_completion_inputs(llm_request, effective_model)
normalized_messages = _normalize_ollama_chat_messages(
messages,
model=effective_model,
Expand Down
155 changes: 135 additions & 20 deletions tests/unittests/models/test_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,13 @@
# limitations under the License

import contextlib
from enum import Enum
import json
import logging
import os
import sys
import tempfile
from typing import List
import unittest
from unittest.mock import ANY
from unittest.mock import AsyncMock
Expand Down Expand Up @@ -222,8 +224,25 @@
]


class _EnumType(str, Enum):
"""Enum used to validate $ref handling."""

A = "A"
B = "B"


class _InnerObject(BaseModel):
enum_value: _EnumType
detail: str = Field(description="Some detail")


class _OuterObject(BaseModel):
items: List[_InnerObject]
name: str = Field(description="Name field")


class _StructuredOutput(BaseModel):
value: int = Field(description="Value to emit")
objects: List[_OuterObject]


class _ModelDumpOnly:
Expand Down Expand Up @@ -325,8 +344,18 @@ def test_to_litellm_response_format_uses_json_schema_for_openai_model():
assert "json_schema" in formatted
assert formatted["json_schema"]["name"] == "_StructuredOutput"
assert formatted["json_schema"]["strict"] is True
assert formatted["json_schema"]["schema"]["additionalProperties"] is False
assert "additionalProperties" in formatted["json_schema"]["schema"]

schema = formatted["json_schema"]["schema"]
assert schema["additionalProperties"] is False
assert schema["$defs"]["_OuterObject"]["additionalProperties"] is False
assert schema["$defs"]["_InnerObject"]["additionalProperties"] is False

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


def test_to_litellm_response_format_uses_response_schema_for_gemini_model():
Expand All @@ -337,7 +366,19 @@ def test_to_litellm_response_format_uses_response_schema_for_gemini_model():

assert formatted["type"] == "json_object"
assert "response_schema" in formatted
assert formatted["response_schema"] == _StructuredOutput.model_json_schema()

schema = formatted["response_schema"]
assert schema == _StructuredOutput.model_json_schema()
assert "additionalProperties" not in schema
assert "additionalProperties" not in schema["$defs"]["_OuterObject"]
assert "additionalProperties" not in schema["$defs"]["_InnerObject"]

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


def test_to_litellm_response_format_uses_response_schema_for_vertex_gemini():
Expand All @@ -348,7 +389,19 @@ def test_to_litellm_response_format_uses_response_schema_for_vertex_gemini():

assert formatted["type"] == "json_object"
assert "response_schema" in formatted
assert formatted["response_schema"] == _StructuredOutput.model_json_schema()

schema = formatted["response_schema"]
assert schema == _StructuredOutput.model_json_schema()
assert "additionalProperties" not in schema
assert "additionalProperties" not in schema["$defs"]["_OuterObject"]
assert "additionalProperties" not in schema["$defs"]["_InnerObject"]

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


def test_to_litellm_response_format_uses_json_schema_for_azure_openai():
Expand All @@ -359,10 +412,22 @@ def test_to_litellm_response_format_uses_json_schema_for_azure_openai():

assert formatted["type"] == "json_schema"
assert "json_schema" in formatted
assert "schema" in formatted["json_schema"]

assert formatted["json_schema"]["name"] == "_StructuredOutput"
assert formatted["json_schema"]["strict"] is True
assert formatted["json_schema"]["schema"]["additionalProperties"] is False
assert "additionalProperties" in formatted["json_schema"]["schema"]

schema = formatted["json_schema"]["schema"]
assert schema["additionalProperties"] is False
assert schema["$defs"]["_OuterObject"]["additionalProperties"] is False
assert schema["$defs"]["_InnerObject"]["additionalProperties"] is False

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


def test_to_litellm_response_format_uses_json_schema_for_anthropic():
Expand All @@ -375,8 +440,18 @@ def test_to_litellm_response_format_uses_json_schema_for_anthropic():
assert "json_schema" in formatted
assert formatted["json_schema"]["name"] == "_StructuredOutput"
assert formatted["json_schema"]["strict"] is True
assert formatted["json_schema"]["schema"]["additionalProperties"] is False
assert "additionalProperties" in formatted["json_schema"]["schema"]

schema = formatted["json_schema"]["schema"]
assert schema["additionalProperties"] is False
assert schema["$defs"]["_OuterObject"]["additionalProperties"] is False
assert schema["$defs"]["_InnerObject"]["additionalProperties"] is False

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


def test_to_litellm_response_format_with_dict_schema_for_openai():
Expand All @@ -391,7 +466,9 @@ def test_to_litellm_response_format_with_dict_schema_for_openai():
assert formatted["type"] == "json_schema"
assert formatted["json_schema"]["name"] == "response"
assert formatted["json_schema"]["strict"] is True
assert formatted["json_schema"]["schema"]["additionalProperties"] is False

schema = formatted["json_schema"]["schema"]
assert schema["additionalProperties"] is False


async def test_get_completion_inputs_uses_openai_format_for_openai_model():
Expand All @@ -409,9 +486,18 @@ async def test_get_completion_inputs_uses_openai_format_for_openai_model():
assert "json_schema" in response_format
assert response_format["json_schema"]["name"] == "_StructuredOutput"
assert response_format["json_schema"]["strict"] is True
assert (
response_format["json_schema"]["schema"]["additionalProperties"] is False
)

schema = response_format["json_schema"]["schema"]
assert schema["additionalProperties"] is False
assert schema["$defs"]["_OuterObject"]["additionalProperties"] is False
assert schema["$defs"]["_InnerObject"]["additionalProperties"] is False

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


async def test_get_completion_inputs_uses_gemini_format_for_gemini_model():
Expand All @@ -428,6 +514,18 @@ async def test_get_completion_inputs_uses_gemini_format_for_gemini_model():
assert response_format["type"] == "json_object"
assert "response_schema" in response_format

schema = response_format["response_schema"]
assert "additionalProperties" not in schema
assert "additionalProperties" not in schema["$defs"]["_OuterObject"]
assert "additionalProperties" not in schema["$defs"]["_InnerObject"]

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


async def test_get_completion_inputs_uses_passed_model_for_response_format():
"""Test that _get_completion_inputs uses the passed model parameter for response format.
Expand All @@ -449,9 +547,18 @@ async def test_get_completion_inputs_uses_passed_model_for_response_format():
assert "json_schema" in response_format
assert response_format["json_schema"]["name"] == "_StructuredOutput"
assert response_format["json_schema"]["strict"] is True
assert (
response_format["json_schema"]["schema"]["additionalProperties"] is False
)

schema = response_format["json_schema"]["schema"]
assert schema["additionalProperties"] is False
assert schema["$defs"]["_OuterObject"]["additionalProperties"] is False
assert schema["$defs"]["_InnerObject"]["additionalProperties"] is False

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


async def test_get_completion_inputs_uses_passed_model_for_gemini_format():
Expand All @@ -473,6 +580,18 @@ async def test_get_completion_inputs_uses_passed_model_for_gemini_format():
assert response_format["type"] == "json_object"
assert "response_schema" in response_format

schema = response_format["response_schema"]
assert "additionalProperties" not in schema
assert "additionalProperties" not in schema["$defs"]["_OuterObject"]
assert "additionalProperties" not in schema["$defs"]["_InnerObject"]

enum_ref = schema["$defs"]["_InnerObject"]["properties"]["enum_value"]
assert enum_ref == {"$ref": "#/$defs/_EnumType"}

items_ref = schema["$defs"]["_OuterObject"]["properties"]["items"]
assert items_ref["type"] == "array"
assert items_ref["items"] == {"$ref": "#/$defs/_InnerObject"}


@pytest.mark.asyncio
async def test_get_completion_inputs_inserts_missing_tool_results():
Expand Down Expand Up @@ -879,7 +998,6 @@ def completion(self, model, messages, tools, stream, **kwargs):

@pytest.mark.asyncio
async def test_generate_content_async(mock_acompletion, lite_llm_instance):

async for response in lite_llm_instance.generate_content_async(
LLM_REQUEST_WITH_FUNCTION_DECLARATION
):
Expand Down Expand Up @@ -1069,7 +1187,6 @@ def test_append_fallback_user_content_ignores_function_response_parts():
def test_maybe_append_user_content(
lite_llm_instance, llm_request, expected_output
):

lite_llm_instance._maybe_append_user_content(llm_request)

assert len(llm_request.contents) == expected_output
Expand Down Expand Up @@ -3074,7 +3191,6 @@ async def test_completion_with_drop_params(mock_completion, mock_client):
async def test_generate_content_async_stream(
mock_completion, lite_llm_instance
):

mock_completion.return_value = iter(STREAMING_MODEL_RESPONSE)

responses = [
Expand Down Expand Up @@ -3173,7 +3289,6 @@ async def test_generate_content_async_stream_sets_finish_reason(
async def test_generate_content_async_stream_with_usage_metadata(
mock_completion, lite_llm_instance
):

streaming_model_response_with_usage_metadata = [
*STREAMING_MODEL_RESPONSE,
ModelResponseStream(
Expand Down