Skip to content

Commit 261ef0b

Browse files
authored
Merge branch 'master' into ivana/recursive-guard-in-batcher
2 parents eec68c4 + b905cd3 commit 261ef0b

File tree

2 files changed

+40
-17
lines changed

2 files changed

+40
-17
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 27 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,8 @@ def _collect_ai_data(
159159
usage: "_RecordedUsage",
160160
content_blocks: "list[str]",
161161
response_id: "str | None" = None,
162-
) -> "tuple[str | None, _RecordedUsage, list[str], str | None]":
162+
finish_reason: "str | None" = None,
163+
) -> "tuple[str | None, _RecordedUsage, list[str], str | None, str | None]":
163164
"""
164165
Collect model information, token usage, and collect content blocks from the AI streaming response.
165166
"""
@@ -197,6 +198,7 @@ def _collect_ai_data(
197198
usage,
198199
content_blocks,
199200
response_id,
201+
finish_reason,
200202
)
201203

202204
# Counterintuitive, but message_delta contains cumulative token counts :)
@@ -221,18 +223,17 @@ def _collect_ai_data(
221223
usage.cache_read_input_tokens = cache_read_input_tokens
222224
# TODO: Record event.usage.server_tool_use
223225

224-
return (
225-
model,
226-
usage,
227-
content_blocks,
228-
response_id,
229-
)
226+
if event.delta.stop_reason is not None:
227+
finish_reason = event.delta.stop_reason
228+
229+
return (model, usage, content_blocks, response_id, finish_reason)
230230

231231
return (
232232
model,
233233
usage,
234234
content_blocks,
235235
response_id,
236+
finish_reason,
236237
)
237238

238239

@@ -411,6 +412,7 @@ def _wrap_synchronous_message_iterator(
411412
usage = _RecordedUsage()
412413
content_blocks: "list[str]" = []
413414
response_id = None
415+
finish_reason = None
414416

415417
try:
416418
for event in iterator:
@@ -430,12 +432,15 @@ def _wrap_synchronous_message_iterator(
430432
yield event
431433
continue
432434

433-
(model, usage, content_blocks, response_id) = _collect_ai_data(
434-
event,
435-
model,
436-
usage,
437-
content_blocks,
438-
response_id,
435+
(model, usage, content_blocks, response_id, finish_reason) = (
436+
_collect_ai_data(
437+
event,
438+
model,
439+
usage,
440+
content_blocks,
441+
response_id,
442+
finish_reason,
443+
)
439444
)
440445
yield event
441446
finally:
@@ -459,6 +464,7 @@ def _wrap_synchronous_message_iterator(
459464
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
460465
finish_span=True,
461466
response_id=response_id,
467+
finish_reason=finish_reason,
462468
)
463469

464470

@@ -475,6 +481,7 @@ async def _wrap_asynchronous_message_iterator(
475481
usage = _RecordedUsage()
476482
content_blocks: "list[str]" = []
477483
response_id = None
484+
finish_reason = None
478485

479486
try:
480487
async for event in iterator:
@@ -499,12 +506,14 @@ async def _wrap_asynchronous_message_iterator(
499506
usage,
500507
content_blocks,
501508
response_id,
509+
finish_reason,
502510
) = _collect_ai_data(
503511
event,
504512
model,
505513
usage,
506514
content_blocks,
507515
response_id,
516+
finish_reason,
508517
)
509518
yield event
510519
finally:
@@ -528,6 +537,7 @@ async def _wrap_asynchronous_message_iterator(
528537
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
529538
finish_span=True,
530539
response_id=response_id,
540+
finish_reason=finish_reason,
531541
)
532542

533543

@@ -542,12 +552,15 @@ def _set_output_data(
542552
content_blocks: "list[Any]",
543553
finish_span: bool = False,
544554
response_id: "str | None" = None,
555+
finish_reason: "str | None" = None,
545556
) -> None:
546557
"""
547558
Set output data for the span based on the AI response."""
548559
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
549560
if response_id is not None:
550561
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response_id)
562+
if finish_reason is not None:
563+
span.set_data(SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason])
551564
if should_send_default_pii() and integration.include_prompts:
552565
output_messages: "dict[str, list[Any]]" = {
553566
"response": [],
@@ -652,6 +665,7 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
652665
content_blocks=content_blocks,
653666
finish_span=True,
654667
response_id=getattr(result, "id", None),
668+
finish_reason=getattr(result, "stop_reason", None),
655669
)
656670
else:
657671
span.set_data("unknown_response", True)

tests/integrations/anthropic/test_anthropic.py

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ async def __call__(self, *args, **kwargs):
6363
role="assistant",
6464
content=[TextBlock(type="text", text="Hi, I'm Claude.")],
6565
type="message",
66+
stop_reason="end_turn",
6667
usage=Usage(input_tokens=10, output_tokens=20),
6768
)
6869

@@ -136,6 +137,7 @@ def test_nonstreaming_create_message(
136137
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
137138
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False
138139
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
140+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"]
139141

140142

141143
@pytest.mark.asyncio
@@ -258,7 +260,7 @@ def test_streaming_create_message(
258260
),
259261
ContentBlockStopEvent(type="content_block_stop", index=0),
260262
MessageDeltaEvent(
261-
delta=Delta(),
263+
delta=Delta(stop_reason="max_tokens"),
262264
usage=MessageDeltaUsage(output_tokens=10),
263265
type="message_delta",
264266
),
@@ -323,6 +325,7 @@ def test_streaming_create_message(
323325
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20
324326
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
325327
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
328+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"]
326329

327330

328331
@pytest.mark.parametrize(
@@ -373,7 +376,7 @@ def test_stream_messages(
373376
),
374377
ContentBlockStopEvent(type="content_block_stop", index=0),
375378
MessageDeltaEvent(
376-
delta=Delta(),
379+
delta=Delta(stop_reason="max_tokens"),
377380
usage=MessageDeltaUsage(output_tokens=10),
378381
type="message_delta",
379382
),
@@ -439,6 +442,7 @@ def test_stream_messages(
439442
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20
440443
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
441444
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
445+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"]
442446

443447

444448
@pytest.mark.asyncio
@@ -492,7 +496,7 @@ async def test_streaming_create_message_async(
492496
),
493497
ContentBlockStopEvent(type="content_block_stop", index=0),
494498
MessageDeltaEvent(
495-
delta=Delta(),
499+
delta=Delta(stop_reason="max_tokens"),
496500
usage=MessageDeltaUsage(output_tokens=10),
497501
type="message_delta",
498502
),
@@ -504,6 +508,7 @@ async def test_streaming_create_message_async(
504508
sentry_init(
505509
integrations=[AnthropicIntegration(include_prompts=include_prompts)],
506510
traces_sample_rate=1.0,
511+
default_integrations=False,
507512
send_default_pii=send_default_pii,
508513
)
509514
events = capture_events()
@@ -559,6 +564,7 @@ async def test_streaming_create_message_async(
559564
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20
560565
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
561566
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
567+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"]
562568

563569

564570
@pytest.mark.asyncio
@@ -1471,14 +1477,15 @@ def test_collect_ai_data_with_input_json_delta():
14711477

14721478
content_blocks = []
14731479

1474-
model, new_usage, new_content_blocks, response_id = _collect_ai_data(
1480+
model, new_usage, new_content_blocks, response_id, finish_reason = _collect_ai_data(
14751481
event, model, usage, content_blocks
14761482
)
14771483
assert model is None
14781484
assert new_usage.input_tokens == usage.input_tokens
14791485
assert new_usage.output_tokens == usage.output_tokens
14801486
assert new_content_blocks == ["test"]
14811487
assert response_id is None
1488+
assert finish_reason is None
14821489

14831490

14841491
@pytest.mark.skipif(
@@ -1766,6 +1773,7 @@ def test_nonstreaming_create_message_with_system_prompt(
17661773
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20
17671774
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
17681775
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False
1776+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"]
17691777

17701778

17711779
@pytest.mark.asyncio
@@ -1851,6 +1859,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async(
18511859
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20
18521860
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
18531861
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False
1862+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"]
18541863

18551864

18561865
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)