Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/strands/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1025,7 +1025,7 @@ async def _convert_prompt_to_messages(self, prompt: AgentInput) -> Messages:
# Check if all item in input list are dictionaries
elif all(isinstance(item, dict) for item in prompt):
# Check if all items are messages
if all(all(key in item for key in Message.__annotations__.keys()) for item in prompt):
if all(all(key in item for key in Message.__required_keys__) for item in prompt):
# Messages input - add all messages to conversation
messages = cast(Messages, prompt)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,7 @@ def recover_message_on_max_tokens_reached(message: Message) -> Message:
}
)

return {"content": valid_content, "role": message["role"]}
recovered: Message = {"content": valid_content, "role": message["role"]}
if "metadata" in message:
recovered["metadata"] = message["metadata"]
return recovered
7 changes: 7 additions & 0 deletions src/strands/event_loop/event_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,13 @@ async def _handle_model_execution(
stop_reason, message, usage, metrics = event["stop"]
invocation_state.setdefault("request_state", {})

# Attach metadata to the assistant message immediately so it's
# available to all downstream consumers (hooks, events, state).
message["metadata"] = {
"usage": usage,
"metrics": metrics,
}

after_model_call_event = AfterModelCallEvent(
agent=agent,
invocation_state=invocation_state,
Expand Down
3 changes: 3 additions & 0 deletions src/strands/event_loop/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,9 @@ async def stream_messages(
logger.debug("model=<%s> | streaming messages", model)

messages = _normalize_messages(messages)
# Whitelist only role and content before sending to the model provider.
# This ensures metadata (and any future non-model fields) never leak to providers.
messages = [Message(role=msg["role"], content=msg["content"]) for msg in messages]
start_time = time.time()

chunks = model.stream(
Expand Down
4 changes: 1 addition & 3 deletions src/strands/telemetry/tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,9 +527,7 @@ def start_event_loop_cycle_span(
event_loop_cycle_id = str(invocation_state.get("event_loop_cycle_id"))
parent_span = parent_span if parent_span else invocation_state.get("event_loop_parent_span")

attributes: dict[str, AttributeValue] = self._get_common_attributes(
operation_name="execute_event_loop_cycle"
)
attributes: dict[str, AttributeValue] = self._get_common_attributes(operation_name="execute_event_loop_cycle")
attributes["event_loop.cycle_id"] = event_loop_cycle_id

if custom_trace_attributes:
Expand Down
30 changes: 29 additions & 1 deletion src/strands/types/content.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
- Bedrock docs: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_Types_Amazon_Bedrock_Runtime.html
"""

from typing import Literal
from typing import Any, Literal

from typing_extensions import NotRequired, TypedDict

from .citations import CitationsContentBlock
from .event_loop import Metrics, Usage
from .media import DocumentContent, ImageContent, VideoContent
from .tools import ToolResult, ToolUse

Expand Down Expand Up @@ -177,17 +178,44 @@ class ContentBlockStop(TypedDict):
"""


class MessageMetadata(TypedDict, total=False):
"""Optional metadata attached to a message.
Not sent to model providers — explicitly stripped before model calls.
Persisted alongside the message in session storage.
Attributes:
usage: Token usage information from the model response.
metrics: Performance metrics from the model response.
custom: Arbitrary user/framework metadata (e.g. compression provenance).
"""

usage: Usage
metrics: Metrics
custom: dict[str, Any]


class Message(TypedDict):
"""A message in a conversation with the agent.
Attributes:
content: The message content.
role: The role of the message sender.
metadata: Optional metadata, stripped before model calls.
"""

content: list[ContentBlock]
role: Role
metadata: NotRequired[MessageMetadata]


Messages = list[Message]
"""A list of messages representing a conversation."""


def get_message_metadata(message: Message) -> MessageMetadata:
"""Get metadata for a message, returning empty dict if not present.
Individual fields (usage, metrics, custom) may not be present. Use .get() to safely access them.
"""
return message.get("metadata", {})
13 changes: 9 additions & 4 deletions tests/strands/agent/hooks/test_agent_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ async def test_stream_e2e_success(alist):
{"toolUse": {"input": {}, "name": "normal_tool", "toolUseId": "123"}},
],
"role": "assistant",
"metadata": ANY,
}
},
{
Expand Down Expand Up @@ -205,6 +206,7 @@ async def test_stream_e2e_success(alist):
{"toolUse": {"input": {}, "name": "async_tool", "toolUseId": "1234"}},
],
"role": "assistant",
"metadata": ANY,
}
},
{
Expand Down Expand Up @@ -263,6 +265,7 @@ async def test_stream_e2e_success(alist):
{"toolUse": {"input": {}, "name": "streaming_tool", "toolUseId": "12345"}},
],
"role": "assistant",
"metadata": ANY,
}
},
{
Expand Down Expand Up @@ -307,11 +310,11 @@ async def test_stream_e2e_success(alist):
},
{"event": {"contentBlockStop": {}}},
{"event": {"messageStop": {"stopReason": "end_turn"}}},
{"message": {"content": [{"text": "I invoked the tools!"}], "role": "assistant"}},
{"message": {"content": [{"text": "I invoked the tools!"}], "role": "assistant", "metadata": ANY}},
{
"result": AgentResult(
stop_reason="end_turn",
message={"content": [{"text": "I invoked the tools!"}], "role": "assistant"},
message={"content": [{"text": "I invoked the tools!"}], "role": "assistant", "metadata": ANY},
metrics=ANY,
state={},
),
Expand Down Expand Up @@ -371,11 +374,11 @@ async def test_stream_e2e_throttle_and_redact(alist, mock_sleep):
},
{"event": {"contentBlockStop": {}}},
{"event": {"messageStop": {"stopReason": "guardrail_intervened"}}},
{"message": {"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant"}},
{"message": {"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant", "metadata": ANY}},
{
"result": AgentResult(
stop_reason="guardrail_intervened",
message={"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant"},
message={"content": [{"text": "INPUT BLOCKED!"}], "role": "assistant", "metadata": ANY},
metrics=ANY,
state={},
),
Expand Down Expand Up @@ -442,6 +445,7 @@ async def test_stream_e2e_reasoning_redacted_content(alist):
{"text": "Response with redacted reasoning"},
],
"role": "assistant",
"metadata": ANY,
}
},
{
Expand All @@ -453,6 +457,7 @@ async def test_stream_e2e_reasoning_redacted_content(alist):
{"text": "Response with redacted reasoning"},
],
"role": "assistant",
"metadata": ANY,
},
metrics=ANY,
state={},
Expand Down
16 changes: 11 additions & 5 deletions tests/strands/agent/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def test_agent__call__(
"stop_reason": result.stop_reason,
}
exp_result = {
"message": {"content": [{"text": "test text"}], "role": "assistant"},
"message": {"content": [{"text": "test text"}], "role": "assistant", "metadata": unittest.mock.ANY},
"state": {},
"stop_reason": "end_turn",
}
Expand Down Expand Up @@ -781,6 +781,7 @@ def test_agent__call__callback(mock_model, agent, callback_handler, agenerator):
{"reasoningContent": {"reasoningText": {"text": "value", "signature": "value"}}},
{"text": "value"},
],
"metadata": unittest.mock.ANY,
},
),
unittest.mock.call(
Expand All @@ -793,6 +794,7 @@ def test_agent__call__callback(mock_model, agent, callback_handler, agenerator):
{"reasoningContent": {"reasoningText": {"text": "value", "signature": "value"}}},
{"text": "value"},
],
"metadata": unittest.mock.ANY,
},
metrics=unittest.mock.ANY,
state={},
Expand All @@ -817,7 +819,7 @@ async def test_agent__call__in_async_context(mock_model, agent, agenerator):
result = agent("test")

tru_message = result.message
exp_message = {"content": [{"text": "abc"}], "role": "assistant"}
exp_message = {"content": [{"text": "abc"}], "role": "assistant", "metadata": unittest.mock.ANY}
assert tru_message == exp_message


Expand All @@ -837,7 +839,7 @@ async def test_agent_invoke_async(mock_model, agent, agenerator):
result = await agent.invoke_async("test")

tru_message = result.message
exp_message = {"content": [{"text": "abc"}], "role": "assistant"}
exp_message = {"content": [{"text": "abc"}], "role": "assistant", "metadata": unittest.mock.ANY}
assert tru_message == exp_message


Expand Down Expand Up @@ -1128,7 +1130,7 @@ async def test_stream_async_multi_modal_input(mock_model, agent, agenerator, ali
tru_message = agent.messages
exp_message = [
{"content": prompt, "role": "user"},
{"content": [{"text": "I see text and an image"}], "role": "assistant"},
{"content": [{"text": "I see text and an image"}], "role": "assistant", "metadata": unittest.mock.ANY},
]
assert tru_message == exp_message

Expand Down Expand Up @@ -1966,7 +1968,11 @@ def shell(command: str):
}

# And that it continued to the LLM call
assert agent.messages[-1] == {"content": [{"text": "I invoked a tool!"}], "role": "assistant"}
assert agent.messages[-1] == {
"content": [{"text": "I invoked a tool!"}],
"role": "assistant",
"metadata": unittest.mock.ANY,
}


def test_agent_string_system_prompt():
Expand Down
3 changes: 2 additions & 1 deletion tests/strands/agent/test_agent_cancellation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import asyncio
import threading
from unittest.mock import ANY

import pytest

Expand Down Expand Up @@ -31,7 +32,7 @@ async def test_agent_cancel_before_invocation():
result = await agent.invoke_async("Hello")

assert result.stop_reason == "cancelled"
assert result.message == {"role": "assistant", "content": [{"text": "Cancelled by user"}]}
assert result.message == {"role": "assistant", "content": [{"text": "Cancelled by user"}], "metadata": ANY}


@pytest.mark.asyncio
Expand Down
6 changes: 4 additions & 2 deletions tests/strands/agent/test_agent_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def test_agent__call__hooks(agent, hook_provider, agent_tool, mock_model, tool_u
message={
"content": [{"toolUse": tool_use}],
"role": "assistant",
"metadata": ANY,
},
stop_reason="tool_use",
),
Expand All @@ -199,7 +200,7 @@ def test_agent__call__hooks(agent, hook_provider, agent_tool, mock_model, tool_u
agent=agent,
invocation_state=ANY,
stop_response=AfterModelCallEvent.ModelStopResponse(
message=mock_model.agent_responses[1],
message={"role": "assistant", "content": [{"text": "I invoked a tool!"}], "metadata": ANY},
stop_reason="end_turn",
),
exception=None,
Expand Down Expand Up @@ -246,6 +247,7 @@ async def test_agent_stream_async_hooks(agent, hook_provider, agent_tool, mock_m
message={
"content": [{"toolUse": tool_use}],
"role": "assistant",
"metadata": ANY,
},
stop_reason="tool_use",
),
Expand All @@ -272,7 +274,7 @@ async def test_agent_stream_async_hooks(agent, hook_provider, agent_tool, mock_m
agent=agent,
invocation_state=ANY,
stop_response=AfterModelCallEvent.ModelStopResponse(
message=mock_model.agent_responses[1],
message={"role": "assistant", "content": [{"text": "I invoked a tool!"}], "metadata": ANY},
stop_reason="end_turn",
),
exception=None,
Expand Down
17 changes: 9 additions & 8 deletions tests/strands/event_loop/test_event_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ async def test_event_loop_cycle_text_response(
tru_stop_reason, tru_message, _, tru_request_state, _, _ = events[-1]["stop"]

exp_stop_reason = "end_turn"
exp_message = {"role": "assistant", "content": [{"text": "test text"}]}
exp_message = {"role": "assistant", "content": [{"text": "test text"}], "metadata": ANY}
exp_request_state = {}

assert tru_stop_reason == exp_stop_reason and tru_message == exp_message and tru_request_state == exp_request_state
Expand Down Expand Up @@ -225,7 +225,7 @@ async def test_event_loop_cycle_text_response_throttling(
tru_stop_reason, tru_message, _, tru_request_state, _, _ = events[-1]["stop"]

exp_stop_reason = "end_turn"
exp_message = {"role": "assistant", "content": [{"text": "test text"}]}
exp_message = {"role": "assistant", "content": [{"text": "test text"}], "metadata": ANY}
exp_request_state = {}

assert tru_stop_reason == exp_stop_reason and tru_message == exp_message and tru_request_state == exp_request_state
Expand Down Expand Up @@ -264,7 +264,7 @@ async def test_event_loop_cycle_exponential_backoff(

# Verify the final response
assert tru_stop_reason == "end_turn"
assert tru_message == {"role": "assistant", "content": [{"text": "test text"}]}
assert tru_message == {"role": "assistant", "content": [{"text": "test text"}], "metadata": ANY}
assert tru_request_state == {}

# Verify that sleep was called with increasing delays
Expand Down Expand Up @@ -354,7 +354,7 @@ async def test_event_loop_cycle_tool_result(
tru_stop_reason, tru_message, _, tru_request_state, _, _ = events[-1]["stop"]

exp_stop_reason = "end_turn"
exp_message = {"role": "assistant", "content": [{"text": "test text"}]}
exp_message = {"role": "assistant", "content": [{"text": "test text"}], "metadata": ANY}
exp_request_state = {}

assert tru_stop_reason == exp_stop_reason and tru_message == exp_message and tru_request_state == exp_request_state
Expand Down Expand Up @@ -389,7 +389,6 @@ async def test_event_loop_cycle_tool_result(
},
],
},
{"role": "assistant", "content": [{"text": "test text"}]},
],
tool_registry.get_all_tool_specs(),
"p1",
Expand Down Expand Up @@ -484,6 +483,7 @@ async def test_event_loop_cycle_stop(
}
}
],
"metadata": ANY,
}
exp_request_state = {"stop_event_loop": True}

Expand Down Expand Up @@ -946,14 +946,14 @@ async def test_event_loop_cycle_exception_model_hooks(mock_sleep, agent, model,
agent=agent,
invocation_state=ANY,
stop_response=AfterModelCallEvent.ModelStopResponse(
message={"content": [{"text": "test text"}], "role": "assistant"}, stop_reason="end_turn"
message={"content": [{"text": "test text"}], "role": "assistant", "metadata": ANY}, stop_reason="end_turn"
),
exception=None,
)

# Final message
assert next(events) == MessageAddedEvent(
agent=agent, message={"content": [{"text": "test text"}], "role": "assistant"}
agent=agent, message={"content": [{"text": "test text"}], "role": "assistant", "metadata": ANY}
)


Expand Down Expand Up @@ -997,6 +997,7 @@ def interrupt_callback(event):
},
],
"role": "assistant",
"metadata": ANY,
},
},
"interrupts": {
Expand Down Expand Up @@ -1131,7 +1132,7 @@ async def test_invalid_tool_names_adds_tool_uses(agent, model, alist):
# ensure that we got end_turn and not tool_use
assert events[-1] == EventLoopStopEvent(
stop_reason="end_turn",
message={"content": [{"text": "I invoked a tool!"}], "role": "assistant"},
message={"content": [{"text": "I invoked a tool!"}], "role": "assistant", "metadata": ANY},
metrics=ANY,
request_state={},
)
Expand Down
Loading
Loading