Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion frontend/src/components/Chat/ChatInputArea.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ const ChatInputArea = forwardRef<ChatInputAreaHandle, ChatInputAreaProps>(functi
/>
</div>
<div className={styles.columnRight}>
{activeTarget && activeTarget.supports_multi_turn === false && (
{activeTarget && (activeTarget.capabilities?.supports_multi_turn ?? activeTarget.supports_multi_turn) === false && (
<Tooltip
content="This target does not track conversation history — each turn is sent independently."
relationship="description"
Expand Down
4 changes: 2 additions & 2 deletions frontend/src/components/Chat/ChatWindow.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ export default function ChatWindow({
}
}, [attackResultId])

const singleTurnLimitReached = activeTarget?.supports_multi_turn === false && messages.some(m => m.role === 'user')
const singleTurnLimitReached = (activeTarget?.capabilities?.supports_multi_turn ?? activeTarget?.supports_multi_turn) === false && messages.some(m => m.role === 'user')

// Operator locking: if the loaded attack's operator differs from the current
// user's operator label, the conversation should be read-only.
Expand Down Expand Up @@ -561,7 +561,7 @@ export default function ChatWindow({
onBranchConversation={attackResultId && activeConversationId ? handleBranchConversation : undefined}
onBranchAttack={activeTarget && activeConversationId ? handleBranchAttack : undefined}
isLoading={isLoadingAttack || isLoadingMessages || awaitingConversationLoad}
isSingleTurn={activeTarget?.supports_multi_turn === false}
isSingleTurn={(activeTarget?.capabilities?.supports_multi_turn ?? activeTarget?.supports_multi_turn) === false}
isOperatorLocked={isOperatorLocked}
isCrossTarget={isCrossTargetLocked}
noTargetSelected={!activeTarget}
Expand Down
6 changes: 6 additions & 0 deletions frontend/src/components/Config/TargetTable.styles.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ export const useTargetTableStyles = makeStyles({
tableLayout: 'fixed',
width: '100%',
},
stickyHeader: {
position: 'sticky',
top: 0,
backgroundColor: tokens.colorNeutralBackground1,
zIndex: 1,
},
activeRow: {
backgroundColor: tokens.colorBrandBackground2,
},
Expand Down
38 changes: 37 additions & 1 deletion frontend/src/components/Config/TargetTable.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,28 @@ const sampleTargets: TargetInstance[] = [
target_type: 'OpenAIChatTarget',
endpoint: 'https://api.openai.com',
model_name: 'gpt-4',
capabilities: {
supports_multi_turn: true,
supports_multi_message_pieces: true,
supports_json_schema: true,
supports_json_output: true,
supports_editable_history: true,
supports_system_prompt: true,
},
},
{
target_registry_name: 'azure_image_dalle',
target_type: 'AzureImageTarget',
endpoint: 'https://azure.openai.com',
model_name: 'dall-e-3',
capabilities: {
supports_multi_turn: false,
supports_multi_message_pieces: false,
supports_json_schema: false,
supports_json_output: false,
supports_editable_history: false,
supports_system_prompt: false,
},
},
{
target_registry_name: 'text_target_basic',
Expand Down Expand Up @@ -58,7 +74,7 @@ describe('TargetTable', () => {
expect(screen.getAllByText('TextTarget').length).toBeGreaterThanOrEqual(1)
})

it('should display Type, Model, Endpoint and Parameters columns', () => {
it('should display Type, Model, Endpoint, capability columns and Parameters columns', () => {
render(
<TestWrapper>
<TargetTable {...defaultProps} />
Expand All @@ -68,6 +84,12 @@ describe('TargetTable', () => {
expect(screen.getByText('Type')).toBeInTheDocument()
expect(screen.getByText('Model')).toBeInTheDocument()
expect(screen.getByText('Endpoint')).toBeInTheDocument()
expect(screen.getByText('Multi-turn')).toBeInTheDocument()
expect(screen.getByText('Multi-piece')).toBeInTheDocument()
expect(screen.getByText('JSON Schema')).toBeInTheDocument()
expect(screen.getByText('JSON Output')).toBeInTheDocument()
expect(screen.getByText('Edit History')).toBeInTheDocument()
expect(screen.getByText('System Prompt')).toBeInTheDocument()
expect(screen.getByText('Parameters')).toBeInTheDocument()
})

Expand Down Expand Up @@ -151,10 +173,24 @@ describe('TargetTable', () => {
</TestWrapper>
)

// Dashes for model, endpoint, and 6 capability columns (all unknown)
const dashes = screen.getAllByText('—')
expect(dashes.length).toBeGreaterThanOrEqual(2)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similar to the comment on 191, should this also be toBeGreaterThanOrEqual(9) / toBe(9)? Also, does the comment above miss the additional dash for params?

})

it('should show dash for capability columns when capabilities is absent', () => {
render(
<TestWrapper>
<TargetTable {...defaultProps} targets={[sampleTargets[2]]} />
</TestWrapper>
)

// TextTarget has no capabilities — all 6 should be dashes
const dashes = screen.getAllByText('—')
// model (—) + endpoint (—) + 6 capabilities (—) + params (—) = 9
expect(dashes.length).toBeGreaterThanOrEqual(8)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the comment above is correct don't we want this to be toBeGreaterThanOrEqual(9)? Also, it might be worth making it just toBe(9) so that we can catch unexpected additions that are not being tested for.

})

it('should display target_specific_params when present', () => {
const targetWithParams: TargetInstance[] = [
{
Expand Down
49 changes: 47 additions & 2 deletions frontend/src/components/Config/TargetTable.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import {
Tooltip,
Select,
} from '@fluentui/react-components'
import { CheckmarkRegular } from '@fluentui/react-icons'
import { CheckmarkRegular, CheckmarkCircleFilled, DismissCircleFilled } from '@fluentui/react-icons'
import type { TargetInstance } from '../../types'
import { useTargetTableStyles } from './TargetTable.styles'

Expand All @@ -39,6 +39,27 @@ function formatParams(params?: Record<string, unknown> | null): string {
return parts.join('\n')
}

/** Capability column definitions with tooltip descriptions. */
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Really like these tooltips! could go either way on this but should we add tooltips for the other fields in this view too (type, model, endpoint, parameters)?

const CAPABILITY_COLUMNS = [
{ key: 'supports_multi_turn', label: 'Multi-turn', tooltip: 'Supports multi-turn conversation history' },
{ key: 'supports_multi_message_pieces', label: 'Multi-piece', tooltip: 'Supports multiple message pieces in a single request' },
{ key: 'supports_json_schema', label: 'JSON Schema', tooltip: 'Supports constraining output to a JSON schema' },
{ key: 'supports_json_output', label: 'JSON Output', tooltip: 'Supports JSON output format' },
{ key: 'supports_editable_history', label: 'Edit History', tooltip: 'Allows attack history to be modified' },
{ key: 'supports_system_prompt', label: 'System Prompt', tooltip: 'Supports system prompts' },
] as const

/** Render a capability indicator: ✓ (green) / ✗ (red) / — (unknown). */
function CapabilityCell({ value }: { value: boolean | undefined }) {
if (value === undefined) {
return <Text size={200}>—</Text>
}
if (value) {
return <CheckmarkCircleFilled style={{ color: 'green', fontSize: '18px' }} />
}
return <DismissCircleFilled style={{ color: 'red', fontSize: '18px' }} />
Comment on lines +57 to +60
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think that we should be hardcoding these values for color and fontSize. Looking at a comparable element in AttackTable.tsx it uses tokens.colorPaletteGreenForeground1 and tokens.colorPaletteRedForeground1. We can probably either drop the explicit font size or use a token for this as well. AttackTable lets the icons inherit, so we could do that same thing here.

Worth noting as well that AttackTable uses Regular instead of Filled icons, not necessarily a concern but it is a bit of insistency between tables.

}

/** Render the model cell with a tooltip when underlying model differs. */
function ModelCell({ target }: { target: TargetInstance }) {
const displayName = target.model_name || '—'
Expand All @@ -62,6 +83,21 @@ function ModelCell({ target }: { target: TargetInstance }) {
return <Text size={200}>{displayName}</Text>
}

/** Render capability cells for a target. */
function CapabilityCells({ target }: { target: TargetInstance }) {
return (
<>
{CAPABILITY_COLUMNS.map(({ key }) => (
<TableCell key={key} style={{ width: '80px', textAlign: 'center' }}>
<CapabilityCell
value={target.capabilities?.[key]}
/>
</TableCell>
))}
</>
)
}

export default function TargetTable({ targets, activeTarget, onSetActiveTarget }: TargetTableProps) {
const styles = useTargetTableStyles()
const [typeFilter, setTypeFilter] = useState('')
Expand Down Expand Up @@ -99,6 +135,7 @@ export default function TargetTable({ targets, activeTarget, onSetActiveTarget }
{activeTarget.endpoint || '—'}
</Text>
</TableCell>
<CapabilityCells target={activeTarget} />
<TableCell style={{ width: '200px' }}>
<Text size={200} className={styles.paramsCell}>
{formatParams(activeTarget.target_specific_params) || '—'}
Expand Down Expand Up @@ -126,12 +163,19 @@ export default function TargetTable({ targets, activeTarget, onSetActiveTarget }
)}

<Table aria-label="Target instances" className={styles.table}>
<TableHeader>
<TableHeader className={styles.stickyHeader}>
<TableRow>
<TableHeaderCell style={{ width: '120px' }} />
<TableHeaderCell style={{ width: '200px' }}>Type</TableHeaderCell>
<TableHeaderCell style={{ width: '180px' }}>Model</TableHeaderCell>
<TableHeaderCell style={{ minWidth: '300px' }}>Endpoint</TableHeaderCell>
{CAPABILITY_COLUMNS.map(({ key, label, tooltip }) => (
<TableHeaderCell key={key} style={{ width: '80px', textAlign: 'center' }}>
<Tooltip content={tooltip} relationship="description">
<Text size={200} style={{ cursor: 'help' }}>{label}</Text>
</Tooltip>
</TableHeaderCell>
))}
<TableHeaderCell style={{ width: '200px' }}>Parameters</TableHeaderCell>
</TableRow>
</TableHeader>
Expand Down Expand Up @@ -167,6 +211,7 @@ export default function TargetTable({ targets, activeTarget, onSetActiveTarget }
{target.endpoint || '—'}
</Text>
</TableCell>
<CapabilityCells target={target} />
<TableCell>
<Text size={200} className={styles.paramsCell}>
{formatParams(target.target_specific_params) || '—'}
Expand Down
10 changes: 10 additions & 0 deletions frontend/src/types/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,15 @@ export interface PaginationInfo {

// --- Targets ---

export interface TargetCapabilitiesInfo {
supports_multi_turn: boolean
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit is the font size for this table purposefully different? It's slightly smaller looking than the other headers

supports_multi_message_pieces: boolean
supports_json_schema: boolean
supports_json_output: boolean
supports_editable_history: boolean
supports_system_prompt: boolean
}

export interface TargetInstance {
target_registry_name: string
target_type: string
Expand All @@ -63,6 +72,7 @@ export interface TargetInstance {
top_p?: number | null
max_requests_per_minute?: number | null
supports_multi_turn?: boolean
capabilities?: TargetCapabilitiesInfo | null
target_specific_params?: Record<string, unknown> | null
}

Expand Down
17 changes: 15 additions & 2 deletions pyrit/backend/mappers/target_mappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
Target mappers – domain → DTO translation for target-related models.
"""

from pyrit.backend.models.targets import TargetInstance
from pyrit.backend.models.targets import TargetCapabilitiesInfo, TargetInstance
from pyrit.prompt_target import PromptTarget


Expand All @@ -27,6 +27,7 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
params = identifier.params

# Keys that are extracted as top-level TargetInstance fields
# or are internal-only (target_configuration is the verbose capabilities blob).
extracted_keys = {
"endpoint",
"model_name",
Expand All @@ -36,13 +37,24 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
"max_requests_per_minute",
"supports_multi_turn",
"target_specific_params",
"target_configuration",
}

# Collect remaining params as target_specific_params so the frontend can display them
explicit_specific = params.get("target_specific_params") or {}
extra = {k: v for k, v in params.items() if k not in extracted_keys and v is not None}
combined_specific = {**extra, **explicit_specific} or None

caps = target_obj.capabilities
capabilities = TargetCapabilitiesInfo(
supports_multi_turn=caps.supports_multi_turn,
supports_multi_message_pieces=caps.supports_multi_message_pieces,
supports_json_schema=caps.supports_json_schema,
supports_json_output=caps.supports_json_output,
supports_editable_history=caps.supports_editable_history,
supports_system_prompt=caps.supports_system_prompt,
)

return TargetInstance(
target_registry_name=target_registry_name,
target_type=identifier.class_name,
Expand All @@ -52,6 +64,7 @@ def target_object_to_instance(target_registry_name: str, target_obj: PromptTarge
temperature=params.get("temperature"),
top_p=params.get("top_p"),
max_requests_per_minute=params.get("max_requests_per_minute"),
supports_multi_turn=target_obj.capabilities.supports_multi_turn,
supports_multi_turn=caps.supports_multi_turn,
capabilities=capabilities,
target_specific_params=combined_specific,
)
18 changes: 18 additions & 0 deletions pyrit/backend/models/targets.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,23 @@
from pyrit.backend.models.common import PaginationInfo


class TargetCapabilitiesInfo(BaseModel):
"""Structured capability flags for a target instance."""

supports_multi_turn: bool = Field(..., description="Whether the target supports multi-turn conversation history")
supports_multi_message_pieces: bool = Field(
..., description="Whether the target supports multiple message pieces in a single request"
)
supports_json_schema: bool = Field(
..., description="Whether the target supports constraining output to a JSON schema"
)
supports_json_output: bool = Field(..., description="Whether the target supports JSON output format")
supports_editable_history: bool = Field(
..., description="Whether the target allows the attack history to be modified"
)
supports_system_prompt: bool = Field(..., description="Whether the target supports system prompts")


class TargetInstance(BaseModel):
"""
A runtime target instance.
Expand All @@ -37,6 +54,7 @@ class TargetInstance(BaseModel):
top_p: Optional[float] = Field(None, description="Top-p parameter for generation")
max_requests_per_minute: Optional[int] = Field(None, description="Maximum requests per minute")
supports_multi_turn: bool = Field(True, description="Whether the target supports multi-turn conversation history")
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we intentionally keep this top-level instance of supports_multi_turn or should this one be removed in favor of the one that will be captured in the nested capabilities field?

capabilities: Optional[TargetCapabilitiesInfo] = Field(None, description="Structured capability flags")
target_specific_params: Optional[dict[str, Any]] = Field(None, description="Additional target-specific parameters")


Expand Down
67 changes: 67 additions & 0 deletions tests/unit/backend/test_mappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1257,6 +1257,73 @@ def test_chat_target_extra_params_preserved(self) -> None:
assert result.target_specific_params["seed"] == 42
assert result.target_specific_params["max_completion_tokens"] == 2048

def test_capabilities_populated_from_target_object(self) -> None:
"""Test that all 6 capability fields are populated from target_obj.capabilities."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(
supports_multi_turn=True,
supports_multi_message_pieces=True,
supports_json_schema=False,
supports_json_output=True,
supports_editable_history=False,
supports_system_prompt=True,
)
mock_identifier = ComponentIdentifier(
class_name="OpenAIChatTarget",
class_module="pyrit.prompt_target",
params={"endpoint": "https://api.openai.com", "model_name": "gpt-4"},
)
target_obj.get_identifier.return_value = mock_identifier

result = target_object_to_instance("t-1", target_obj)

assert result.capabilities is not None
assert result.capabilities.supports_multi_turn is True
assert result.capabilities.supports_multi_message_pieces is True
assert result.capabilities.supports_json_schema is False
assert result.capabilities.supports_json_output is True
assert result.capabilities.supports_editable_history is False
assert result.capabilities.supports_system_prompt is True

def test_capabilities_matches_legacy_supports_multi_turn(self) -> None:
"""Test that legacy supports_multi_turn field matches capabilities.supports_multi_turn."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(supports_multi_turn=False)
mock_identifier = ComponentIdentifier(
class_name="TextTarget",
class_module="pyrit.prompt_target",
)
target_obj.get_identifier.return_value = mock_identifier

result = target_object_to_instance("t-1", target_obj)

assert result.supports_multi_turn is False
assert result.capabilities is not None
assert result.capabilities.supports_multi_turn is False
assert result.supports_multi_turn == result.capabilities.supports_multi_turn

def test_target_configuration_excluded_from_target_specific_params(self) -> None:
"""Test that the verbose target_configuration blob is filtered from target_specific_params."""
target_obj = MagicMock(spec=PromptTarget)
target_obj.capabilities = TargetCapabilities(supports_multi_turn=True)
mock_identifier = ComponentIdentifier(
class_name="OpenAIChatTarget",
class_module="pyrit.prompt_target",
params={
"endpoint": "https://api.openai.com",
"model_name": "gpt-4",
"target_configuration": {"capabilities": {"supports_multi_turn": True}},
"reasoning_effort": "high",
},
)
target_obj.get_identifier.return_value = mock_identifier

result = target_object_to_instance("t-1", target_obj)

assert result.target_specific_params is not None
assert "target_configuration" not in result.target_specific_params
assert result.target_specific_params["reasoning_effort"] == "high"


# ============================================================================
# Converter Mapper Tests
Expand Down
Loading