Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions google/genai/_interactions/types/generation_config_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@
from __future__ import annotations

from typing import Union, Iterable
from typing_extensions import Literal, TypeAlias, TypedDict
from typing_extensions import Literal, Annotated, TypeAlias, TypedDict

from .._types import SequenceNotStr
from .._utils import PropertyInfo
from .thinking_level import ThinkingLevel
from .tool_choice_type import ToolChoiceType
from .image_config_param import ImageConfigParam
Expand All @@ -35,32 +36,32 @@
class GenerationConfigParam(TypedDict, total=False):
"""Configuration parameters for model interactions."""

image_config: ImageConfigParam
image_config: Annotated[ImageConfigParam, PropertyInfo(alias="imageConfig")]
"""Configuration for image interaction."""

max_output_tokens: int
max_output_tokens: Annotated[int, PropertyInfo(alias="maxOutputTokens")]
"""The maximum number of tokens to include in the response."""

seed: int
"""Seed used in decoding for reproducibility."""

speech_config: Iterable[SpeechConfigParam]
speech_config: Annotated[Iterable[SpeechConfigParam], PropertyInfo(alias="speechConfig")]
"""Configuration for speech interaction."""

stop_sequences: SequenceNotStr[str]
stop_sequences: Annotated[SequenceNotStr[str], PropertyInfo(alias="stopSequences")]
"""A list of character sequences that will stop output interaction."""

temperature: float
"""Controls the randomness of the output."""

thinking_level: ThinkingLevel
thinking_level: Annotated[ThinkingLevel, PropertyInfo(alias="thinkingLevel")]
"""The level of thought tokens that the model should generate."""

thinking_summaries: Literal["auto", "none"]
thinking_summaries: Annotated[Literal["auto", "none"], PropertyInfo(alias="thinkingSummaries")]
"""Whether to include thought summaries in the response."""

tool_choice: ToolChoice
tool_choice: Annotated[ToolChoice, PropertyInfo(alias="toolChoice")]
"""The tool choice for the interaction."""

top_p: float
top_p: Annotated[float, PropertyInfo(alias="topP")]
"""The maximum cumulative probability of tokens to consider when sampling."""
16 changes: 10 additions & 6 deletions google/genai/_interactions/types/interaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@
from .code_execution_result_content import CodeExecutionResultContent
from .mcp_server_tool_result_content import MCPServerToolResultContent

from pydantic import Field as FieldInfo

__all__ = ["Interaction", "AgentConfig", "Input"]

AgentConfig: TypeAlias = Annotated[
Expand Down Expand Up @@ -104,7 +106,7 @@ class Interaction(BaseModel):
agent: Union[str, Literal["deep-research-pro-preview-12-2025"], None] = None
"""The name of the `Agent` used for generating the interaction."""

agent_config: Optional[AgentConfig] = None
agent_config: Optional[AgentConfig] = FieldInfo(alias="agentConfig", default=None)
"""Configuration for the agent."""

input: Optional[Input] = None
Expand All @@ -116,25 +118,27 @@ class Interaction(BaseModel):
outputs: Optional[List[Content]] = None
"""Output only. Responses from the model."""

previous_interaction_id: Optional[str] = None
previous_interaction_id: Optional[str] = FieldInfo(alias="previousInteractionId", default=None)
"""The ID of the previous interaction, if any."""

response_format: Optional[object] = None
response_format: Optional[object] = FieldInfo(alias="responseFormat", default=None)
"""
Enforces that the generated response is a JSON object that complies with
the JSON schema specified in this field.
"""

response_mime_type: Optional[str] = None
response_mime_type: Optional[str] = FieldInfo(alias="responseMimeType", default=None)
"""The mime type of the response. This is required if response_format is set."""

response_modalities: Optional[List[Literal["text", "image", "audio"]]] = None
response_modalities: Optional[List[Literal["text", "image", "audio"]]] = FieldInfo(
alias="responseModalities", default=None
)
"""The requested modalities of the response (TEXT, IMAGE, AUDIO)."""

role: Optional[str] = None
"""Output only. The role of the interaction."""

system_instruction: Optional[str] = None
system_instruction: Optional[str] = FieldInfo(alias="systemInstruction", default=None)
"""System instruction for the interaction."""

tools: Optional[List[Tool]] = None
Expand Down
32 changes: 17 additions & 15 deletions google/genai/_interactions/types/interaction_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@
from __future__ import annotations

from typing import List, Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict

from .._utils import PropertyInfo

from .tool_param import ToolParam
from .turn_param import TurnParam
Expand Down Expand Up @@ -59,7 +61,7 @@


class BaseCreateModelInteractionParams(TypedDict, total=False):
api_version: str
api_version: Annotated[str, PropertyInfo(alias="apiVersion")]

input: Required[Input]
"""The inputs for the interaction."""
Expand All @@ -70,28 +72,28 @@ class BaseCreateModelInteractionParams(TypedDict, total=False):
background: bool
"""Input only. Whether to run the model interaction in the background."""

generation_config: GenerationConfigParam
generation_config: Annotated[GenerationConfigParam, PropertyInfo(alias="generationConfig")]
"""Input only. Configuration parameters for the model interaction."""

previous_interaction_id: str
previous_interaction_id: Annotated[str, PropertyInfo(alias="previousInteractionId")]
"""The ID of the previous interaction, if any."""

response_format: object
response_format: Annotated[object, PropertyInfo(alias="responseFormat")]
"""
Enforces that the generated response is a JSON object that complies with
the JSON schema specified in this field.
"""

response_mime_type: str
response_mime_type: Annotated[str, PropertyInfo(alias="responseMimeType")]
"""The mime type of the response. This is required if response_format is set."""

response_modalities: List[Literal["text", "image", "audio"]]
response_modalities: Annotated[List[Literal["text", "image", "audio"]], PropertyInfo(alias="responseModalities")]
"""The requested modalities of the response (TEXT, IMAGE, AUDIO)."""

store: bool
"""Input only. Whether to store the response and request for later retrieval."""

system_instruction: str
system_instruction: Annotated[str, PropertyInfo(alias="systemInstruction")]
"""System instruction for the interaction."""

tools: Iterable[ToolParam]
Expand Down Expand Up @@ -124,39 +126,39 @@ class BaseCreateModelInteractionParams(TypedDict, total=False):


class BaseCreateAgentInteractionParams(TypedDict, total=False):
api_version: str
api_version: Annotated[str, PropertyInfo(alias="apiVersion")]

agent: Required[Union[str, Literal["deep-research-pro-preview-12-2025"]]]
"""The name of the `Agent` used for generating the interaction."""

input: Required[Input]
"""The inputs for the interaction."""

agent_config: AgentConfig
agent_config: Annotated[AgentConfig, PropertyInfo(alias="agentConfig")]
"""Configuration for the agent."""

background: bool
"""Input only. Whether to run the model interaction in the background."""

previous_interaction_id: str
previous_interaction_id: Annotated[str, PropertyInfo(alias="previousInteractionId")]
"""The ID of the previous interaction, if any."""

response_format: object
response_format: Annotated[object, PropertyInfo(alias="responseFormat")]
"""
Enforces that the generated response is a JSON object that complies with
the JSON schema specified in this field.
"""

response_mime_type: str
response_mime_type: Annotated[str, PropertyInfo(alias="responseMimeType")]
"""The mime type of the response. This is required if response_format is set."""

response_modalities: List[Literal["text", "image", "audio"]]
response_modalities: Annotated[List[Literal["text", "image", "audio"]], PropertyInfo(alias="responseModalities")]
"""The requested modalities of the response (TEXT, IMAGE, AUDIO)."""

store: bool
"""Input only. Whether to store the response and request for later retrieval."""

system_instruction: str
system_instruction: Annotated[str, PropertyInfo(alias="systemInstruction")]
"""System instruction for the interaction."""

tools: Iterable[ToolParam]
Expand Down
30 changes: 20 additions & 10 deletions google/genai/_interactions/types/usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
from typing import List, Optional
from typing_extensions import Literal

from pydantic import Field as FieldInfo

from .._models import BaseModel

__all__ = [
Expand Down Expand Up @@ -72,35 +74,43 @@ class ToolUseTokensByModality(BaseModel):
class Usage(BaseModel):
"""Statistics on the interaction request's token usage."""

cached_tokens_by_modality: Optional[List[CachedTokensByModality]] = None
cached_tokens_by_modality: Optional[List[CachedTokensByModality]] = FieldInfo(
alias="cachedTokensByModality", default=None
)
"""A breakdown of cached token usage by modality."""

input_tokens_by_modality: Optional[List[InputTokensByModality]] = None
input_tokens_by_modality: Optional[List[InputTokensByModality]] = FieldInfo(
alias="inputTokensByModality", default=None
)
"""A breakdown of input token usage by modality."""

output_tokens_by_modality: Optional[List[OutputTokensByModality]] = None
output_tokens_by_modality: Optional[List[OutputTokensByModality]] = FieldInfo(
alias="outputTokensByModality", default=None
)
"""A breakdown of output token usage by modality."""

tool_use_tokens_by_modality: Optional[List[ToolUseTokensByModality]] = None
tool_use_tokens_by_modality: Optional[List[ToolUseTokensByModality]] = FieldInfo(
alias="toolUseTokensByModality", default=None
)
"""A breakdown of tool-use token usage by modality."""

total_cached_tokens: Optional[int] = None
total_cached_tokens: Optional[int] = FieldInfo(alias="totalCachedTokens", default=None)
"""Number of tokens in the cached part of the prompt (the cached content)."""

total_input_tokens: Optional[int] = None
total_input_tokens: Optional[int] = FieldInfo(alias="totalInputTokens", default=None)
"""Number of tokens in the prompt (context)."""

total_output_tokens: Optional[int] = None
total_output_tokens: Optional[int] = FieldInfo(alias="totalOutputTokens", default=None)
"""Total number of tokens across all the generated responses."""

total_thought_tokens: Optional[int] = None
total_thought_tokens: Optional[int] = FieldInfo(alias="totalThoughtTokens", default=None)
"""Number of tokens of thoughts for thinking models."""

total_tokens: Optional[int] = None
total_tokens: Optional[int] = FieldInfo(alias="totalTokens", default=None)
"""
Total token count for the interaction request (prompt + responses + other
internal tokens).
"""

total_tool_use_tokens: Optional[int] = None
total_tool_use_tokens: Optional[int] = FieldInfo(alias="totalToolUseTokens", default=None)
"""Number of tokens present in tool-use prompt(s)."""
79 changes: 79 additions & 0 deletions google/genai/tests/interactions/test_persistence.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@

# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
from unittest import mock
import pytest
from httpx import Request, Response
from ... import Client

def test_create_interaction_persistence_request_body():
client = Client(api_key='fake-key')

with mock.patch("httpx.Client.send") as mock_send:
mock_send.return_value = Response(
200,
request=Request('POST', ''),
content=json.dumps({"id": "new-id"}).encode()
)

client.interactions.create(
model="gemini-2.0-flash",
input="Hello",
previous_interaction_id="old-id",
generation_config={
"max_output_tokens": 100
}
)

mock_send.assert_called_once()
request = mock_send.call_args[0][0]
body = json.loads(request.read())

# Verify snake_case parameters are converted to camelCase in the request body
assert body["previousInteractionId"] == "old-id"
assert body["generationConfig"]["maxOutputTokens"] == 100
assert "previous_interaction_id" not in body

def test_create_interaction_persistence_response_parsing():
client = Client(api_key='fake-key')

with mock.patch("httpx.Client.send") as mock_send:
mock_send.return_value = Response(
200,
request=Request('POST', ''),
content=json.dumps({
"id": "new-id",
"previousInteractionId": "old-id",
"status": "completed",
"created": "2024-03-22T18:11:19Z",
"updated": "2024-03-22T18:11:19Z",
"usage": {
"totalInputTokens": 10,
"totalOutputTokens": 20
}
}).encode()
)

interaction = client.interactions.create(
model="gemini-2.0-flash",
input="Hello"
)

# Verify camelCase response fields are correctly mapped to snake_case properties
assert interaction.id == "new-id"
assert interaction.previous_interaction_id == "old-id"
assert interaction.usage.total_input_tokens == 10
assert interaction.usage.total_output_tokens == 20