diff --git a/src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap b/src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap index a5ba8136..ed954e84 100644 --- a/src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap +++ b/src/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap @@ -1357,7 +1357,7 @@ def load_model() -> LLM: # CrewAI requires OPENAI_API_KEY env var (ignores api_key parameter) os.environ["OPENAI_API_KEY"] = api_key return LLM( - model="openai/gpt-4o", + model="openai/gpt-4.1", api_key=api_key ) {{/if}} @@ -1397,7 +1397,7 @@ def load_model() -> LLM: # CrewAI requires GEMINI_API_KEY env var (ignores api_key parameter) os.environ["GEMINI_API_KEY"] = api_key return LLM( - model="gemini/gemini-2.0-flash", + model="gemini/gemini-2.5-flash", api_key=api_key ) {{/if}} @@ -1954,7 +1954,7 @@ def _get_api_key() -> str: def load_model() -> ChatOpenAI: """Get authenticated OpenAI model client.""" return ChatOpenAI( - model="gpt-4o", + model="gpt-4.1", api_key=_get_api_key() ) {{/if}} @@ -1991,7 +1991,7 @@ def _get_api_key() -> str: def load_model() -> ChatGoogleGenerativeAI: """Get authenticated Gemini model client.""" return ChatGoogleGenerativeAI( - model="gemini-2.0-flash", + model="gemini-2.5-flash", api_key=_get_api_key() ) {{/if}} @@ -2155,10 +2155,11 @@ async def main(query): try: async with mcp_server as server: active_servers = [server] if server else [] - # Currently defaults to GPT-4.1 - # https://openai.github.io/openai-agents-python/models/ agent = Agent( - name="{{ name }}", mcp_servers=active_servers, tools=[add_numbers] + name="{{ name }}", + model="gpt-4.1", + mcp_servers=active_servers, + tools=[add_numbers] ) result = await Runner.run(agent, query) return result @@ -2536,7 +2537,7 @@ def load_model() -> OpenAIModel: """Get authenticated OpenAI model client.""" return OpenAIModel( client_args={"api_key": _get_api_key()}, - model_id="gpt-4o", + model_id="gpt-4.1", ) {{/if}} {{#if (eq modelProvider "Gemini")}} @@ -2574,7 +2575,7 @@ def load_model() -> GeminiModel: """Get authenticated Gemini model client.""" return GeminiModel( client_args={"api_key": _get_api_key()}, - model_id="gemini-2.0-flash", + model_id="gemini-2.5-flash", ) {{/if}} " diff --git a/src/assets/python/crewai/base/model/load.py b/src/assets/python/crewai/base/model/load.py index 4b08ae53..4e834e42 100644 --- a/src/assets/python/crewai/base/model/load.py +++ b/src/assets/python/crewai/base/model/load.py @@ -87,7 +87,7 @@ def load_model() -> LLM: # CrewAI requires OPENAI_API_KEY env var (ignores api_key parameter) os.environ["OPENAI_API_KEY"] = api_key return LLM( - model="openai/gpt-4o", + model="openai/gpt-4.1", api_key=api_key ) {{/if}} @@ -127,7 +127,7 @@ def load_model() -> LLM: # CrewAI requires GEMINI_API_KEY env var (ignores api_key parameter) os.environ["GEMINI_API_KEY"] = api_key return LLM( - model="gemini/gemini-2.0-flash", + model="gemini/gemini-2.5-flash", api_key=api_key ) {{/if}} diff --git a/src/assets/python/langchain_langgraph/base/model/load.py b/src/assets/python/langchain_langgraph/base/model/load.py index 03982c8d..b8f2d71e 100644 --- a/src/assets/python/langchain_langgraph/base/model/load.py +++ b/src/assets/python/langchain_langgraph/base/model/load.py @@ -80,7 +80,7 @@ def _get_api_key() -> str: def load_model() -> ChatOpenAI: """Get authenticated OpenAI model client.""" return ChatOpenAI( - model="gpt-4o", + model="gpt-4.1", api_key=_get_api_key() ) {{/if}} @@ -117,7 +117,7 @@ def _get_api_key() -> str: def load_model() -> ChatGoogleGenerativeAI: """Get authenticated Gemini model client.""" return ChatGoogleGenerativeAI( - model="gemini-2.0-flash", + model="gemini-2.5-flash", api_key=_get_api_key() ) {{/if}} diff --git a/src/assets/python/openaiagents/base/main.py b/src/assets/python/openaiagents/base/main.py index b39c896b..e6b0f0a8 100644 --- a/src/assets/python/openaiagents/base/main.py +++ b/src/assets/python/openaiagents/base/main.py @@ -26,10 +26,11 @@ async def main(query): try: async with mcp_server as server: active_servers = [server] if server else [] - # Currently defaults to GPT-4.1 - # https://openai.github.io/openai-agents-python/models/ agent = Agent( - name="{{ name }}", mcp_servers=active_servers, tools=[add_numbers] + name="{{ name }}", + model="gpt-4.1", + mcp_servers=active_servers, + tools=[add_numbers] ) result = await Runner.run(agent, query) return result diff --git a/src/assets/python/strands/base/model/load.py b/src/assets/python/strands/base/model/load.py index 95649937..8954269e 100644 --- a/src/assets/python/strands/base/model/load.py +++ b/src/assets/python/strands/base/model/load.py @@ -80,7 +80,7 @@ def load_model() -> OpenAIModel: """Get authenticated OpenAI model client.""" return OpenAIModel( client_args={"api_key": _get_api_key()}, - model_id="gpt-4o", + model_id="gpt-4.1", ) {{/if}} {{#if (eq modelProvider "Gemini")}} @@ -118,6 +118,6 @@ def load_model() -> GeminiModel: """Get authenticated Gemini model client.""" return GeminiModel( client_args={"api_key": _get_api_key()}, - model_id="gemini-2.0-flash", + model_id="gemini-2.5-flash", ) {{/if}} diff --git a/src/cli/commands/dev/command.tsx b/src/cli/commands/dev/command.tsx index 63ed6b14..87f5daba 100644 --- a/src/cli/commands/dev/command.tsx +++ b/src/cli/commands/dev/command.tsx @@ -58,8 +58,10 @@ export const registerDev = (program: Command) => { // Determine which agent/port to invoke let invokePort = port; + let targetAgent = invokeProject?.agents[0]; if (opts.agent && invokeProject) { invokePort = getAgentPort(invokeProject, opts.agent, port); + targetAgent = invokeProject.agents.find(a => a.name === opts.agent); } else if (invokeProject && invokeProject.agents.length > 1 && !opts.agent) { const names = invokeProject.agents.map(a => a.name).join(', '); console.error(`Error: Multiple agents found. Use --agent to specify which one.`); @@ -67,6 +69,11 @@ export const registerDev = (program: Command) => { process.exit(1); } + // Show model info if available + if (targetAgent?.modelProvider) { + console.log(`Provider: ${targetAgent.modelProvider}`); + } + await invokeDevServer(invokePort, opts.invoke, opts.stream ?? false); return; } @@ -129,8 +136,13 @@ export const registerDev = (program: Command) => { console.log(`Port ${basePort} in use, using ${actualPort}`); } + // Get provider info from agent config + const targetAgent = project.agents.find(a => a.name === config.agentName); + const providerInfo = targetAgent?.modelProvider ?? '(see agent code)'; + console.log(`Starting dev server...`); console.log(`Agent: ${config.agentName}`); + console.log(`Provider: ${providerInfo}`); console.log(`Server: http://localhost:${actualPort}/invocations`); console.log(`Log: ${logger.getRelativeLogPath()}`); console.log(`Press Ctrl+C to stop\n`); diff --git a/src/cli/commands/invoke/action.ts b/src/cli/commands/invoke/action.ts index 8d37bafe..19c8e6a5 100644 --- a/src/cli/commands/invoke/action.ts +++ b/src/cli/commands/invoke/action.ts @@ -78,6 +78,9 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption return { success: false, error: 'No prompt provided. Usage: agentcore invoke "your prompt"' }; } + // Get provider info if available + const providerInfo = agentSpec.modelProvider; + // Create logger for this invocation const logger = new InvokeLogger({ agentName: agentSpec.name, @@ -113,6 +116,7 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption targetName: selectedTargetName, response: fullResponse, logFilePath: logger.logFilePath, + providerInfo, }; } catch (err) { logger.logError(err, 'invoke streaming failed'); @@ -136,5 +140,6 @@ export async function handleInvoke(context: InvokeContext, options: InvokeOption targetName: selectedTargetName, response: response.content, logFilePath: logger.logFilePath, + providerInfo, }; } diff --git a/src/cli/commands/invoke/command.tsx b/src/cli/commands/invoke/command.tsx index 415f124f..36e014c0 100644 --- a/src/cli/commands/invoke/command.tsx +++ b/src/cli/commands/invoke/command.tsx @@ -60,7 +60,10 @@ async function handleInvokeCLI(options: InvokeOptions): Promise { console.error(`\nLog: ${result.logFilePath}`); } } else { - // Non-streaming, non-json: print response or error + // Non-streaming, non-json: print provider info and response or error + if (result.providerInfo) { + console.error(`Provider: ${result.providerInfo}`); + } if (result.success && result.response) { console.log(result.response); } else if (!result.success && result.error) { diff --git a/src/cli/commands/invoke/types.ts b/src/cli/commands/invoke/types.ts index b242a83d..c7234bb5 100644 --- a/src/cli/commands/invoke/types.ts +++ b/src/cli/commands/invoke/types.ts @@ -14,4 +14,6 @@ export interface InvokeResult { response?: string; error?: string; logFilePath?: string; + /** Model provider (e.g., "Anthropic", "Bedrock") */ + providerInfo?: string; } diff --git a/src/cli/operations/agent/generate/schema-mapper.ts b/src/cli/operations/agent/generate/schema-mapper.ts index f8fca2e1..ad1a6323 100644 --- a/src/cli/operations/agent/generate/schema-mapper.ts +++ b/src/cli/operations/agent/generate/schema-mapper.ts @@ -112,6 +112,7 @@ export function mapGenerateConfigToAgent(config: GenerateConfig): AgentEnvSpec { codeLocation: codeLocation as DirectoryPath, runtimeVersion: DEFAULT_PYTHON_VERSION, networkMode: DEFAULT_NETWORK_MODE, + modelProvider: config.modelProvider, }; } diff --git a/src/cli/tui/hooks/useDevServer.ts b/src/cli/tui/hooks/useDevServer.ts index a2659ba4..569a6a1b 100644 --- a/src/cli/tui/hooks/useDevServer.ts +++ b/src/cli/tui/hooks/useDevServer.ts @@ -248,5 +248,6 @@ export function useDevServer(options: { workingDir: string; port: number; agentN stop, logFilePath: loggerRef.current?.getRelativeLogPath(), hasMemory: (project?.memories?.length ?? 0) > 0, + modelProvider: project?.agents.find(a => a.name === config?.agentName)?.modelProvider, }; } diff --git a/src/cli/tui/screens/add/AddFlow.tsx b/src/cli/tui/screens/add/AddFlow.tsx index 70487f30..236976f3 100644 --- a/src/cli/tui/screens/add/AddFlow.tsx +++ b/src/cli/tui/screens/add/AddFlow.tsx @@ -1,3 +1,4 @@ +import { DEFAULT_MODEL_IDS } from '../../../../schema'; import { computeDefaultCredentialEnvVarName } from '../../../operations/identity/create-identity'; import { ErrorPrompt } from '../../components'; import { useAvailableAgents } from '../../hooks/useCreateMcp'; @@ -101,6 +102,11 @@ function AgentAddedSummary({ )} + + Model: + {DEFAULT_MODEL_IDS[config.modelProvider]} + via {config.modelProvider} + {showEnvVarReminder && envVarName && ( Note: API key not configured. diff --git a/src/cli/tui/screens/agent/AddAgentScreen.tsx b/src/cli/tui/screens/agent/AddAgentScreen.tsx index 0dac24c8..48c5c06e 100644 --- a/src/cli/tui/screens/agent/AddAgentScreen.tsx +++ b/src/cli/tui/screens/agent/AddAgentScreen.tsx @@ -1,6 +1,6 @@ import { APP_DIR, ConfigIO } from '../../../../lib'; import type { ModelProvider } from '../../../../schema'; -import { AgentNameSchema } from '../../../../schema'; +import { AgentNameSchema, DEFAULT_MODEL_IDS } from '../../../../schema'; import { computeDefaultCredentialEnvVarName } from '../../../operations/identity/create-identity'; import { ApiKeySecretInput, @@ -398,7 +398,10 @@ export function AddAgentScreen({ existingAgentNames, onComplete, onExit }: AddAg { label: 'Type', value: 'Bring my own code' }, { label: 'Code Location', value: byoConfig.codeLocation }, { label: 'Entrypoint', value: byoConfig.entrypoint }, - { label: 'Model Provider', value: byoConfig.modelProvider }, + { + label: 'Model Provider', + value: `${byoConfig.modelProvider} (${DEFAULT_MODEL_IDS[byoConfig.modelProvider]})`, + }, ...(byoConfig.modelProvider !== 'Bedrock' ? [ { diff --git a/src/cli/tui/screens/agent/types.ts b/src/cli/tui/screens/agent/types.ts index 50e6d05e..bf9b6986 100644 --- a/src/cli/tui/screens/agent/types.ts +++ b/src/cli/tui/screens/agent/types.ts @@ -1,5 +1,5 @@ import type { ModelProvider, PythonRuntime, SDKFramework, TargetLanguage } from '../../../../schema'; -import { getSupportedModelProviders } from '../../../../schema'; +import { DEFAULT_MODEL_IDS, getSupportedModelProviders } from '../../../../schema'; import type { MemoryOption } from '../generate/types'; // ───────────────────────────────────────────────────────────────────────────── @@ -89,10 +89,14 @@ export const FRAMEWORK_OPTIONS = [ ] as const; export const MODEL_PROVIDER_OPTIONS = [ - { id: 'Bedrock', title: 'Amazon Bedrock', description: 'AWS managed model inference' }, - { id: 'Anthropic', title: 'Anthropic', description: 'Claude models via Anthropic API' }, - { id: 'OpenAI', title: 'OpenAI', description: 'GPT models via OpenAI API' }, - { id: 'Gemini', title: 'Google Gemini', description: 'Gemini models via Google API' }, + { id: 'Bedrock', title: `Amazon Bedrock (${DEFAULT_MODEL_IDS.Bedrock})`, description: 'AWS managed model inference' }, + { + id: 'Anthropic', + title: `Anthropic (${DEFAULT_MODEL_IDS.Anthropic})`, + description: 'Claude models via Anthropic API', + }, + { id: 'OpenAI', title: `OpenAI (${DEFAULT_MODEL_IDS.OpenAI})`, description: 'GPT models via OpenAI API' }, + { id: 'Gemini', title: `Google Gemini (${DEFAULT_MODEL_IDS.Gemini})`, description: 'Gemini models via Google API' }, ] as const; /** diff --git a/src/cli/tui/screens/create/CreateScreen.tsx b/src/cli/tui/screens/create/CreateScreen.tsx index 84ae7f55..79a62c84 100644 --- a/src/cli/tui/screens/create/CreateScreen.tsx +++ b/src/cli/tui/screens/create/CreateScreen.tsx @@ -1,4 +1,4 @@ -import { ProjectNameSchema } from '../../../../schema'; +import { DEFAULT_MODEL_IDS, ProjectNameSchema } from '../../../../schema'; import { validateFolderNotExists } from '../../../commands/create/validate'; import { computeDefaultCredentialEnvVarName } from '../../../operations/identity/create-identity'; import { @@ -48,11 +48,14 @@ function buildExitMessage(projectName: string, steps: Step[], agentConfig: AddAg if (agentConfig?.agentType === 'create') { const frameworkOption = FRAMEWORK_OPTIONS.find(o => o.id === agentConfig.framework); const frameworkLabel = frameworkOption?.title ?? agentConfig.framework; + const modelName = DEFAULT_MODEL_IDS[agentConfig.modelProvider]; const agentPath = `app/${agentConfig.name}/`; const agentcorePath = 'agentcore/'; const maxPathLen = Math.max(agentPath.length, agentcorePath.length); lines.push(` ${agentPath.padEnd(maxPathLen)} \x1b[2m${agentConfig.language} agent (${frameworkLabel})\x1b[0m`); lines.push(` ${agentcorePath.padEnd(maxPathLen)} \x1b[2mConfig and CDK project\x1b[0m`); + lines.push(''); + lines.push(`\x1b[2mModel:\x1b[0m ${modelName} \x1b[2mvia ${agentConfig.modelProvider}\x1b[0m`); } else if (agentConfig?.agentType === 'byo') { const agentPath = agentConfig.codeLocation; const agentcorePath = 'agentcore/'; @@ -169,6 +172,13 @@ function CreatedSummary({ projectName, agentConfig }: { projectName: string; age + {isCreate && agentConfig && ( + + Model: + {DEFAULT_MODEL_IDS[agentConfig.modelProvider]} + via {agentConfig.modelProvider} + + )} {isByo && agentConfig && ( diff --git a/src/cli/tui/screens/dev/DevScreen.tsx b/src/cli/tui/screens/dev/DevScreen.tsx index e376551f..b4d91338 100644 --- a/src/cli/tui/screens/dev/DevScreen.tsx +++ b/src/cli/tui/screens/dev/DevScreen.tsx @@ -160,6 +160,7 @@ export function DevScreen(props: DevScreenProps) { stop, logFilePath, hasMemory, + modelProvider, } = useDevServer({ workingDir, port: props.port ?? 8080, @@ -383,6 +384,12 @@ export function DevScreen(props: DevScreenProps) { Agent: {config?.agentName} + {modelProvider && ( + + Provider: + {modelProvider} + + )} Server: http://localhost:{actualPort}/invocations diff --git a/src/cli/tui/screens/generate/GenerateWizardUI.tsx b/src/cli/tui/screens/generate/GenerateWizardUI.tsx index 9947cf05..a80753f5 100644 --- a/src/cli/tui/screens/generate/GenerateWizardUI.tsx +++ b/src/cli/tui/screens/generate/GenerateWizardUI.tsx @@ -1,5 +1,5 @@ import type { ModelProvider } from '../../../../schema'; -import { ProjectNameSchema } from '../../../../schema'; +import { DEFAULT_MODEL_IDS, ProjectNameSchema } from '../../../../schema'; import { computeDefaultCredentialEnvVarName } from '../../../operations/identity/create-identity'; import { ApiKeySecretInput, Panel, SelectList, StepIndicator, TextInput } from '../../components'; import type { SelectableItem } from '../../components'; @@ -202,7 +202,9 @@ function ConfirmView({ config, credentialProjectName }: { config: GenerateConfig Model Provider: - {config.modelProvider} + + {config.modelProvider} ({DEFAULT_MODEL_IDS[config.modelProvider]}) + {config.modelProvider !== 'Bedrock' && ( diff --git a/src/cli/tui/screens/generate/types.ts b/src/cli/tui/screens/generate/types.ts index f27f4574..5c1510ed 100644 --- a/src/cli/tui/screens/generate/types.ts +++ b/src/cli/tui/screens/generate/types.ts @@ -1,5 +1,5 @@ import type { ModelProvider, SDKFramework, TargetLanguage } from '../../../../schema'; -import { getSupportedModelProviders } from '../../../../schema'; +import { DEFAULT_MODEL_IDS, getSupportedModelProviders } from '../../../../schema'; export type GenerateStep = 'projectName' | 'language' | 'sdk' | 'modelProvider' | 'apiKey' | 'memory' | 'confirm'; @@ -51,10 +51,14 @@ export const SDK_OPTIONS = [ ] as const; export const MODEL_PROVIDER_OPTIONS = [ - { id: 'Bedrock', title: 'Amazon Bedrock', description: 'AWS managed model inference' }, - { id: 'Anthropic', title: 'Anthropic', description: 'Claude models via Anthropic API' }, - { id: 'OpenAI', title: 'OpenAI', description: 'GPT models via OpenAI API' }, - { id: 'Gemini', title: 'Google Gemini', description: 'Gemini models via Google API' }, + { id: 'Bedrock', title: `Amazon Bedrock (${DEFAULT_MODEL_IDS.Bedrock})`, description: 'AWS managed model inference' }, + { + id: 'Anthropic', + title: `Anthropic (${DEFAULT_MODEL_IDS.Anthropic})`, + description: 'Claude models via Anthropic API', + }, + { id: 'OpenAI', title: `OpenAI (${DEFAULT_MODEL_IDS.OpenAI})`, description: 'GPT models via OpenAI API' }, + { id: 'Gemini', title: `Google Gemini (${DEFAULT_MODEL_IDS.Gemini})`, description: 'Gemini models via Google API' }, ] as const; /** diff --git a/src/cli/tui/screens/invoke/InvokeScreen.tsx b/src/cli/tui/screens/invoke/InvokeScreen.tsx index ed9a2319..ad481605 100644 --- a/src/cli/tui/screens/invoke/InvokeScreen.tsx +++ b/src/cli/tui/screens/invoke/InvokeScreen.tsx @@ -285,6 +285,12 @@ export function InvokeScreen({ {agent?.name} )} + {mode !== 'select-agent' && agent?.modelProvider && ( + + Provider: + {agent.modelProvider} + + )} Target: {config.target.region} diff --git a/src/cli/tui/screens/invoke/useInvokeFlow.ts b/src/cli/tui/screens/invoke/useInvokeFlow.ts index 131122b7..74c49ed7 100644 --- a/src/cli/tui/screens/invoke/useInvokeFlow.ts +++ b/src/cli/tui/screens/invoke/useInvokeFlow.ts @@ -2,6 +2,7 @@ import { ConfigIO } from '../../../../lib'; import type { AgentCoreDeployedState, AwsDeploymentTarget, + ModelProvider, AgentCoreProjectSpec as _AgentCoreProjectSpec, } from '../../../../schema'; import { invokeAgentRuntimeStreaming } from '../../../aws'; @@ -11,7 +12,7 @@ import { generateSessionId } from '../../../operations/session'; import { useCallback, useEffect, useRef, useState } from 'react'; export interface InvokeConfig { - agents: { name: string; state: AgentCoreDeployedState }[]; + agents: { name: string; state: AgentCoreDeployedState; modelProvider?: ModelProvider }[]; target: AwsDeploymentTarget; targetName: string; projectName: string; @@ -77,7 +78,7 @@ export function useInvokeFlow(options: InvokeFlowOptions = {}): InvokeFlowState for (const agent of project.agents) { const state = targetState?.resources?.agents?.[agent.name]; if (state) { - agents.push({ name: agent.name, state }); + agents.push({ name: agent.name, state, modelProvider: agent.modelProvider }); } } diff --git a/src/schema/constants.ts b/src/schema/constants.ts index 7ab4bb60..cff05c1b 100644 --- a/src/schema/constants.ts +++ b/src/schema/constants.ts @@ -13,6 +13,17 @@ export type TargetLanguage = z.infer; export const ModelProviderSchema = z.enum(['Bedrock', 'Gemini', 'OpenAI', 'Anthropic']); export type ModelProvider = z.infer; +/** + * Default model IDs used for each provider. + * These are the models generated in agent templates. + */ +export const DEFAULT_MODEL_IDS: Record = { + Bedrock: 'us.anthropic.claude-sonnet-4-5-20250514-v1:0', + Anthropic: 'claude-sonnet-4-5-20250514', + OpenAI: 'gpt-4.1', + Gemini: 'gemini-2.5-flash', +}; + /** * Matrix defining which model providers are supported for each SDK framework. * - Most SDKs support all 4 providers (Bedrock, Anthropic, OpenAI, Gemini) diff --git a/src/schema/schemas/agent-env.ts b/src/schema/schemas/agent-env.ts index f0c72d9d..9f01a8e0 100644 --- a/src/schema/schemas/agent-env.ts +++ b/src/schema/schemas/agent-env.ts @@ -3,7 +3,11 @@ * * @module agent-env */ -import { NetworkModeSchema, RuntimeVersionSchema as RuntimeVersionSchemaFromConstants } from '../constants'; +import { + ModelProviderSchema, + NetworkModeSchema, + RuntimeVersionSchema as RuntimeVersionSchemaFromConstants, +} from '../constants'; import type { DirectoryPath, FilePath } from '../types'; import { z } from 'zod'; @@ -111,6 +115,8 @@ export const AgentEnvSpecSchema = z.object({ networkMode: NetworkModeSchema.optional(), /** Instrumentation settings for observability. Defaults to OTel enabled. */ instrumentation: InstrumentationSchema.optional(), + /** Model provider used by this agent. Optional for backwards compatibility. */ + modelProvider: ModelProviderSchema.optional(), }); export type AgentEnvSpec = z.infer;